repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/image_degradation/__init__.py | extern/ldm_zero123/modules/image_degradation/__init__.py | from extern.ldm_zero123.modules.image_degradation.bsrgan import (
degradation_bsrgan_variant as degradation_fn_bsr,
)
from extern.ldm_zero123.modules.image_degradation.bsrgan_light import (
degradation_bsrgan_variant as degradation_fn_bsr_light,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/image_degradation/utils_image.py | extern/ldm_zero123/modules/image_degradation/utils_image.py | import math
import os
import random
from datetime import datetime
import cv2
import numpy as np
import torch
from torchvision.utils import make_grid
# import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
"""
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
# https://github.com/twhui/SRGAN-pyTorch
# https://github.com/xinntao/BasicSR
# --------------------------------------------
"""
IMG_EXTENSIONS = [
".jpg",
".JPG",
".jpeg",
".JPEG",
".png",
".PNG",
".ppm",
".PPM",
".bmp",
".BMP",
".tif",
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_timestamp():
return datetime.now().strftime("%y%m%d-%H%M%S")
def imshow(x, title=None, cbar=False, figsize=None):
plt.figure(figsize=figsize)
plt.imshow(np.squeeze(x), interpolation="nearest", cmap="gray")
if title:
plt.title(title)
if cbar:
plt.colorbar()
plt.show()
def surf(Z, cmap="rainbow", figsize=None):
plt.figure(figsize=figsize)
ax3 = plt.axes(projection="3d")
w, h = Z.shape[:2]
xx = np.arange(0, w, 1)
yy = np.arange(0, h, 1)
X, Y = np.meshgrid(xx, yy)
ax3.plot_surface(X, Y, Z, cmap=cmap)
# ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
plt.show()
"""
# --------------------------------------------
# get image pathes
# --------------------------------------------
"""
def get_image_paths(dataroot):
paths = None # return None if dataroot is None
if dataroot is not None:
paths = sorted(_get_paths_from_images(dataroot))
return paths
def _get_paths_from_images(path):
assert os.path.isdir(path), "{:s} is not a valid directory".format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, "{:s} has no valid image file".format(path)
return images
"""
# --------------------------------------------
# split large images into small images
# --------------------------------------------
"""
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
w, h = img.shape[:2]
patches = []
if w > p_max and h > p_max:
w1 = list(np.arange(0, w - p_size, p_size - p_overlap, dtype=np.int))
h1 = list(np.arange(0, h - p_size, p_size - p_overlap, dtype=np.int))
w1.append(w - p_size)
h1.append(h - p_size)
# print(w1)
# print(h1)
for i in w1:
for j in h1:
patches.append(img[i : i + p_size, j : j + p_size, :])
else:
patches.append(img)
return patches
def imssave(imgs, img_path):
"""
imgs: list, N images of size WxHxC
"""
img_name, ext = os.path.splitext(os.path.basename(img_path))
for i, img in enumerate(imgs):
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
new_path = os.path.join(
os.path.dirname(img_path), img_name + str("_s{:04d}".format(i)) + ".png"
)
cv2.imwrite(new_path, img)
def split_imageset(
original_dataroot,
taget_dataroot,
n_channels=3,
p_size=800,
p_overlap=96,
p_max=1000,
):
"""
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
will be splitted.
Args:
original_dataroot:
taget_dataroot:
p_size: size of small images
p_overlap: patch size in training is a good choice
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
"""
paths = get_image_paths(original_dataroot)
for img_path in paths:
# img_name, ext = os.path.splitext(os.path.basename(img_path))
img = imread_uint(img_path, n_channels=n_channels)
patches = patches_from_image(img, p_size, p_overlap, p_max)
imssave(patches, os.path.join(taget_dataroot, os.path.basename(img_path)))
# if original_dataroot == taget_dataroot:
# del img_path
"""
# --------------------------------------------
# makedir
# --------------------------------------------
"""
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + "_archived_" + get_timestamp()
print("Path already exists. Rename it to [{:s}]".format(new_name))
os.rename(path, new_name)
os.makedirs(path)
"""
# --------------------------------------------
# read image from path
# opencv is fast, but read BGR numpy image
# --------------------------------------------
"""
# --------------------------------------------
# get uint8 image of size HxWxn_channles (RGB)
# --------------------------------------------
def imread_uint(path, n_channels=3):
# input: path
# output: HxWx3(RGB or GGG), or HxWx1 (G)
if n_channels == 1:
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
img = np.expand_dims(img, axis=2) # HxWx1
elif n_channels == 3:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
return img
# --------------------------------------------
# matlab's imwrite
# --------------------------------------------
def imsave(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
def imwrite(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
# --------------------------------------------
# get single image of size HxWxn_channles (BGR)
# --------------------------------------------
def read_img(path):
# read image by cv2
# return: Numpy float32, HWC, BGR, [0,1]
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
img = img.astype(np.float32) / 255.0
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
"""
# --------------------------------------------
# image format conversion
# --------------------------------------------
# numpy(single) <---> numpy(unit)
# numpy(single) <---> tensor
# numpy(unit) <---> tensor
# --------------------------------------------
"""
# --------------------------------------------
# numpy(single) [0, 1] <---> numpy(unit)
# --------------------------------------------
def uint2single(img):
return np.float32(img / 255.0)
def single2uint(img):
return np.uint8((img.clip(0, 1) * 255.0).round())
def uint162single(img):
return np.float32(img / 65535.0)
def single2uint16(img):
return np.uint16((img.clip(0, 1) * 65535.0).round())
# --------------------------------------------
# numpy(unit) (HxWxC or HxW) <---> tensor
# --------------------------------------------
# convert uint to 4-dimensional torch tensor
def uint2tensor4(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return (
torch.from_numpy(np.ascontiguousarray(img))
.permute(2, 0, 1)
.float()
.div(255.0)
.unsqueeze(0)
)
# convert uint to 3-dimensional torch tensor
def uint2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return (
torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.0)
)
# convert 2/3/4-dimensional torch tensor to uint
def tensor2uint(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return np.uint8((img * 255.0).round())
# --------------------------------------------
# numpy(single) (HxWxC) <---> tensor
# --------------------------------------------
# convert single (HxWxC) to 3-dimensional torch tensor
def single2tensor3(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert single (HxWxC) to 4-dimensional torch tensor
def single2tensor4(img):
return (
torch.from_numpy(np.ascontiguousarray(img))
.permute(2, 0, 1)
.float()
.unsqueeze(0)
)
# convert torch tensor to single
def tensor2single(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
# convert torch tensor to single
def tensor2single3(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
elif img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
def single2tensor5(img):
return (
torch.from_numpy(np.ascontiguousarray(img))
.permute(2, 0, 1, 3)
.float()
.unsqueeze(0)
)
def single32tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
def single42tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
# from skimage.io import imread, imsave
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
"""
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
tensor = (
tensor.squeeze().float().cpu().clamp_(*min_max)
) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
"Only support 4D, 3D and 2D tensor. But received with dimension: {:d}".format(
n_dim
)
)
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
"""
# --------------------------------------------
# Augmentation, flipe and/or rotate
# --------------------------------------------
# The following two are enough.
# (1) augmet_img: numpy image of WxHxC or WxH
# (2) augment_img_tensor4: tensor image 1xCxWxH
# --------------------------------------------
"""
def augment_img(img, mode=0):
"""Kai Zhang (github: https://github.com/cszn)"""
if mode == 0:
return img
elif mode == 1:
return np.flipud(np.rot90(img))
elif mode == 2:
return np.flipud(img)
elif mode == 3:
return np.rot90(img, k=3)
elif mode == 4:
return np.flipud(np.rot90(img, k=2))
elif mode == 5:
return np.rot90(img)
elif mode == 6:
return np.rot90(img, k=2)
elif mode == 7:
return np.flipud(np.rot90(img, k=3))
def augment_img_tensor4(img, mode=0):
"""Kai Zhang (github: https://github.com/cszn)"""
if mode == 0:
return img
elif mode == 1:
return img.rot90(1, [2, 3]).flip([2])
elif mode == 2:
return img.flip([2])
elif mode == 3:
return img.rot90(3, [2, 3])
elif mode == 4:
return img.rot90(2, [2, 3]).flip([2])
elif mode == 5:
return img.rot90(1, [2, 3])
elif mode == 6:
return img.rot90(2, [2, 3])
elif mode == 7:
return img.rot90(3, [2, 3]).flip([2])
def augment_img_tensor(img, mode=0):
"""Kai Zhang (github: https://github.com/cszn)"""
img_size = img.size()
img_np = img.data.cpu().numpy()
if len(img_size) == 3:
img_np = np.transpose(img_np, (1, 2, 0))
elif len(img_size) == 4:
img_np = np.transpose(img_np, (2, 3, 1, 0))
img_np = augment_img(img_np, mode=mode)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
if len(img_size) == 3:
img_tensor = img_tensor.permute(2, 0, 1)
elif len(img_size) == 4:
img_tensor = img_tensor.permute(3, 2, 0, 1)
return img_tensor.type_as(img)
def augment_img_np3(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return img.transpose(1, 0, 2)
elif mode == 2:
return img[::-1, :, :]
elif mode == 3:
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 4:
return img[:, ::-1, :]
elif mode == 5:
img = img[:, ::-1, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 6:
img = img[:, ::-1, :]
img = img[::-1, :, :]
return img
elif mode == 7:
img = img[:, ::-1, :]
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
def augment_imgs(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
"""
# --------------------------------------------
# modcrop and shave
# --------------------------------------------
"""
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[: H - H_r, : W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[: H - H_r, : W - W_r, :]
else:
raise ValueError("Wrong img ndim: [{:d}].".format(img.ndim))
return img
def shave(img_in, border=0):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
h, w = img.shape[:2]
img = img[border : h - border, border : w - border]
return img
"""
# --------------------------------------------
# image processing process on numpy image
# channel_convert(in_c, tar_type, img_list):
# rgb2ycbcr(img, only_y=True):
# bgr2ycbcr(img, only_y=True):
# ycbcr2rgb(img):
# --------------------------------------------
"""
def rgb2ycbcr(img, only_y=True):
"""same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.0
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(
img,
[
[65.481, -37.797, 112.0],
[128.553, -74.203, -93.786],
[24.966, 112.0, -18.214],
],
) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type)
def ycbcr2rgb(img):
"""same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.0
# convert
rlt = np.matmul(
img,
[
[0.00456621, 0.00456621, 0.00456621],
[0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0],
],
) * 255.0 + [-222.921, 135.576, -276.836]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True):
"""bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.0
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(
img,
[
[24.966, 112.0, -18.214],
[128.553, -74.203, -93.786],
[65.481, -37.797, 112.0],
],
) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.0
return rlt.astype(in_img_type)
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
if in_c == 3 and tar_type == "gray": # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == "y": # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == "RGB": # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
"""
# --------------------------------------------
# metric, PSNR and SSIM
# --------------------------------------------
"""
# --------------------------------------------
# PSNR
# --------------------------------------------
def calculate_psnr(img1, img2, border=0):
# img1 and img2 have range [0, 255]
# img1 = img1.squeeze()
# img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError("Input images must have the same dimensions.")
h, w = img1.shape[:2]
img1 = img1[border : h - border, border : w - border]
img2 = img2[border : h - border, border : w - border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return float("inf")
return 20 * math.log10(255.0 / math.sqrt(mse))
# --------------------------------------------
# SSIM
# --------------------------------------------
def calculate_ssim(img1, img2, border=0):
"""calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
"""
# img1 = img1.squeeze()
# img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError("Input images must have the same dimensions.")
h, w = img1.shape[:2]
img1 = img1[border : h - border, border : w - border]
img2 = img2[border : h - border, border : w - border]
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1[:, :, i], img2[:, :, i]))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError("Wrong input image dimensions.")
def ssim(img1, img2):
C1 = (0.01 * 255) ** 2
C2 = (0.03 * 255) ** 2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
)
return ssim_map.mean()
"""
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
"""
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + (
-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
) * (((absx > 1) * (absx <= 2)).type_as(absx))
def calculate_weights_indices(
in_length, out_length, scale, kernel, kernel_width, antialiasing
):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(
0, P - 1, P
).view(1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = "cubic"
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing
)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing
)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = (
img_aug[j, idx : idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
)
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx : idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = "cubic"
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing
)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing
)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = (
img_aug[idx : idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
)
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx : idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
if __name__ == "__main__":
print("---")
# img = imread_uint('test.bmp', 3)
# img = uint2single(img)
# img_bicubic = imresize_np(img, 1/4)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/distributions/distributions.py | extern/ldm_zero123/modules/distributions/distributions.py | import numpy as np
import torch
class AbstractDistribution:
def sample(self):
raise NotImplementedError()
def mode(self):
raise NotImplementedError()
class DiracDistribution(AbstractDistribution):
def __init__(self, value):
self.value = value
def sample(self):
return self.value
def mode(self):
return self.value
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(
device=self.parameters.device
)
def sample(self):
x = self.mean + self.std * torch.randn(self.mean.shape).to(
device=self.parameters.device
)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(
torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
dim=[1, 2, 3],
)
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,
dim=[1, 2, 3],
)
def nll(self, sample, dims=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims,
)
def mode(self):
return self.mean
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for torch.exp().
logvar1, logvar2 = [
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ torch.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/distributions/__init__.py | extern/ldm_zero123/modules/distributions/__init__.py | python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false | |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/losses/vqperceptual.py | extern/ldm_zero123/modules/losses/vqperceptual.py | import torch
import torch.nn.functional as F
from einops import repeat
from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
from taming.modules.losses.lpips import LPIPS
from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss
from torch import nn
def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0]
loss_real = torch.mean(F.relu(1.0 - logits_real), dim=[1, 2, 3])
loss_fake = torch.mean(F.relu(1.0 + logits_fake), dim=[1, 2, 3])
loss_real = (weights * loss_real).sum() / weights.sum()
loss_fake = (weights * loss_fake).sum() / weights.sum()
d_loss = 0.5 * (loss_real + loss_fake)
return d_loss
def adopt_weight(weight, global_step, threshold=0, value=0.0):
if global_step < threshold:
weight = value
return weight
def measure_perplexity(predicted_indices, n_embed):
# src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py
# eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally
encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed)
avg_probs = encodings.mean(0)
perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp()
cluster_use = torch.sum(avg_probs > 0)
return perplexity, cluster_use
def l1(x, y):
return torch.abs(x - y)
def l2(x, y):
return torch.pow((x - y), 2)
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(
self,
disc_start,
codebook_weight=1.0,
pixelloss_weight=1.0,
disc_num_layers=3,
disc_in_channels=3,
disc_factor=1.0,
disc_weight=1.0,
perceptual_weight=1.0,
use_actnorm=False,
disc_conditional=False,
disc_ndf=64,
disc_loss="hinge",
n_classes=None,
perceptual_loss="lpips",
pixel_loss="l1",
):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
assert perceptual_loss in ["lpips", "clips", "dists"]
assert pixel_loss in ["l1", "l2"]
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
if perceptual_loss == "lpips":
print(f"{self.__class__.__name__}: Running with LPIPS.")
self.perceptual_loss = LPIPS().eval()
else:
raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<")
self.perceptual_weight = perceptual_weight
if pixel_loss == "l1":
self.pixel_loss = l1
else:
self.pixel_loss = l2
self.discriminator = NLayerDiscriminator(
input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
ndf=disc_ndf,
).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
self.n_classes = n_classes
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(
nll_loss, self.last_layer[0], retain_graph=True
)[0]
g_grads = torch.autograd.grad(
g_loss, self.last_layer[0], retain_graph=True
)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(
self,
codebook_loss,
inputs,
reconstructions,
optimizer_idx,
global_step,
last_layer=None,
cond=None,
split="train",
predicted_indices=None,
):
if not exists(codebook_loss):
codebook_loss = torch.tensor([0.0]).to(inputs.device)
# rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(
inputs.contiguous(), reconstructions.contiguous()
)
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
# nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
nll_loss = torch.mean(nll_loss)
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(
torch.cat((reconstructions.contiguous(), cond), dim=1)
)
g_loss = -torch.mean(logits_fake)
try:
d_weight = self.calculate_adaptive_weight(
nll_loss, g_loss, last_layer=last_layer
)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(
self.disc_factor, global_step, threshold=self.discriminator_iter_start
)
loss = (
nll_loss
+ d_weight * disc_factor * g_loss
+ self.codebook_weight * codebook_loss.mean()
)
log = {
"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/quant_loss".format(split): codebook_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/p_loss".format(split): p_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
}
if predicted_indices is not None:
assert self.n_classes is not None
with torch.no_grad():
perplexity, cluster_usage = measure_perplexity(
predicted_indices, self.n_classes
)
log[f"{split}/perplexity"] = perplexity
log[f"{split}/cluster_usage"] = cluster_usage
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(
torch.cat((inputs.contiguous().detach(), cond), dim=1)
)
logits_fake = self.discriminator(
torch.cat((reconstructions.contiguous().detach(), cond), dim=1)
)
disc_factor = adopt_weight(
self.disc_factor, global_step, threshold=self.discriminator_iter_start
)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {
"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean(),
}
return d_loss, log
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/losses/contperceptual.py | extern/ldm_zero123/modules/losses/contperceptual.py | import torch
import torch.nn as nn
from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no?
class LPIPSWithDiscriminator(nn.Module):
def __init__(
self,
disc_start,
logvar_init=0.0,
kl_weight=1.0,
pixelloss_weight=1.0,
disc_num_layers=3,
disc_in_channels=3,
disc_factor=1.0,
disc_weight=1.0,
perceptual_weight=1.0,
use_actnorm=False,
disc_conditional=False,
disc_loss="hinge",
):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.kl_weight = kl_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
# output log variance
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
self.discriminator = NLayerDiscriminator(
input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm
).apply(weights_init)
self.discriminator_iter_start = disc_start
self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(
nll_loss, self.last_layer[0], retain_graph=True
)[0]
g_grads = torch.autograd.grad(
g_loss, self.last_layer[0], retain_graph=True
)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(
self,
inputs,
reconstructions,
posteriors,
optimizer_idx,
global_step,
last_layer=None,
cond=None,
split="train",
weights=None,
):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(
inputs.contiguous(), reconstructions.contiguous()
)
rec_loss = rec_loss + self.perceptual_weight * p_loss
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
weighted_nll_loss = nll_loss
if weights is not None:
weighted_nll_loss = weights * nll_loss
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
kl_loss = posteriors.kl()
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(
torch.cat((reconstructions.contiguous(), cond), dim=1)
)
g_loss = -torch.mean(logits_fake)
if self.disc_factor > 0.0:
try:
d_weight = self.calculate_adaptive_weight(
nll_loss, g_loss, last_layer=last_layer
)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
else:
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(
self.disc_factor, global_step, threshold=self.discriminator_iter_start
)
loss = (
weighted_nll_loss
+ self.kl_weight * kl_loss
+ d_weight * disc_factor * g_loss
)
log = {
"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/logvar".format(split): self.logvar.detach(),
"{}/kl_loss".format(split): kl_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
}
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(
torch.cat((inputs.contiguous().detach(), cond), dim=1)
)
logits_fake = self.discriminator(
torch.cat((reconstructions.contiguous().detach(), cond), dim=1)
)
disc_factor = adopt_weight(
self.disc_factor, global_step, threshold=self.discriminator_iter_start
)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {
"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean(),
}
return d_loss, log
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/losses/__init__.py | extern/ldm_zero123/modules/losses/__init__.py | from extern.ldm_zero123.modules.losses.contperceptual import LPIPSWithDiscriminator
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/encoders/modules.py | extern/ldm_zero123/modules/encoders/modules.py | from functools import partial
import clip
import kornia
import numpy as np
import torch
import torch.nn as nn
from extern.ldm_zero123.modules.x_transformer import ( # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
Encoder,
TransformerWrapper,
)
from extern.ldm_zero123.util import default
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class IdentityEncoder(AbstractEncoder):
def encode(self, x):
return x
class FaceClipEncoder(AbstractEncoder):
def __init__(self, augment=True, retreival_key=None):
super().__init__()
self.encoder = FrozenCLIPImageEmbedder()
self.augment = augment
self.retreival_key = retreival_key
def forward(self, img):
encodings = []
with torch.no_grad():
x_offset = 125
if self.retreival_key:
# Assumes retrieved image are packed into the second half of channels
face = img[:, 3:, 190:440, x_offset : (512 - x_offset)]
other = img[:, :3, ...].clone()
else:
face = img[:, :, 190:440, x_offset : (512 - x_offset)]
other = img.clone()
if self.augment:
face = K.RandomHorizontalFlip()(face)
other[:, :, 190:440, x_offset : (512 - x_offset)] *= 0
encodings = [
self.encoder.encode(face),
self.encoder.encode(other),
]
return torch.cat(encodings, dim=1)
def encode(self, img):
if isinstance(img, list):
# Uncondition
return torch.zeros(
(1, 2, 768), device=self.encoder.model.visual.conv1.weight.device
)
return self(img)
class FaceIdClipEncoder(AbstractEncoder):
def __init__(self):
super().__init__()
self.encoder = FrozenCLIPImageEmbedder()
for p in self.encoder.parameters():
p.requires_grad = False
self.id = FrozenFaceEncoder(
"/home/jpinkney/code/stable-diffusion/model_ir_se50.pth", augment=True
)
def forward(self, img):
encodings = []
with torch.no_grad():
face = kornia.geometry.resize(
img, (256, 256), interpolation="bilinear", align_corners=True
)
other = img.clone()
other[:, :, 184:452, 122:396] *= 0
encodings = [
self.id.encode(face),
self.encoder.encode(other),
]
return torch.cat(encodings, dim=1)
def encode(self, img):
if isinstance(img, list):
# Uncondition
return torch.zeros(
(1, 2, 768), device=self.encoder.model.visual.conv1.weight.device
)
return self(img)
class ClassEmbedder(nn.Module):
def __init__(self, embed_dim, n_classes=1000, key="class"):
super().__init__()
self.key = key
self.embedding = nn.Embedding(n_classes, embed_dim)
def forward(self, batch, key=None):
if key is None:
key = self.key
# this is for use in crossattn
c = batch[key][:, None]
c = self.embedding(c)
return c
class TransformerEmbedder(AbstractEncoder):
"""Some transformer encoder layers"""
def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
super().__init__()
self.device = device
self.transformer = TransformerWrapper(
num_tokens=vocab_size,
max_seq_len=max_seq_len,
attn_layers=Encoder(dim=n_embed, depth=n_layer),
)
def forward(self, tokens):
tokens = tokens.to(self.device) # meh
z = self.transformer(tokens, return_embeddings=True)
return z
def encode(self, x):
return self(x)
class BERTTokenizer(AbstractEncoder):
"""Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
def __init__(self, device="cuda", vq_interface=True, max_length=77):
super().__init__()
from transformers import BertTokenizerFast # TODO: add to reuquirements
self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
self.device = device
self.vq_interface = vq_interface
self.max_length = max_length
def forward(self, text):
batch_encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_length,
return_length=True,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt",
)
tokens = batch_encoding["input_ids"].to(self.device)
return tokens
@torch.no_grad()
def encode(self, text):
tokens = self(text)
if not self.vq_interface:
return tokens
return None, None, [None, None, tokens]
def decode(self, text):
return text
class BERTEmbedder(AbstractEncoder):
"""Uses the BERT tokenizr model and add some transformer encoder layers"""
def __init__(
self,
n_embed,
n_layer,
vocab_size=30522,
max_seq_len=77,
device="cuda",
use_tokenizer=True,
embedding_dropout=0.0,
):
super().__init__()
self.use_tknz_fn = use_tokenizer
if self.use_tknz_fn:
self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
self.device = device
self.transformer = TransformerWrapper(
num_tokens=vocab_size,
max_seq_len=max_seq_len,
attn_layers=Encoder(dim=n_embed, depth=n_layer),
emb_dropout=embedding_dropout,
)
def forward(self, text):
if self.use_tknz_fn:
tokens = self.tknz_fn(text) # .to(self.device)
else:
tokens = text
z = self.transformer(tokens, return_embeddings=True)
return z
def encode(self, text):
# output of length 77
return self(text)
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class FrozenT5Embedder(AbstractEncoder):
"""Uses the T5 transformer encoder for text"""
def __init__(
self, version="google/t5-v1_1-large", device="cuda", max_length=77
): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
super().__init__()
self.tokenizer = T5Tokenizer.from_pretrained(version)
self.transformer = T5EncoderModel.from_pretrained(version)
self.device = device
self.max_length = max_length # TODO: typical value?
self.freeze()
def freeze(self):
self.transformer = self.transformer.eval()
# self.train = disabled_train
for param in self.parameters():
param.requires_grad = False
def forward(self, text):
batch_encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_length,
return_length=True,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt",
)
tokens = batch_encoding["input_ids"].to(self.device)
outputs = self.transformer(input_ids=tokens)
z = outputs.last_hidden_state
return z
def encode(self, text):
return self(text)
import kornia.augmentation as K
from extern.ldm_zero123.thirdp.psp.id_loss import IDFeatures
class FrozenFaceEncoder(AbstractEncoder):
def __init__(self, model_path, augment=False):
super().__init__()
self.loss_fn = IDFeatures(model_path)
# face encoder is frozen
for p in self.loss_fn.parameters():
p.requires_grad = False
# Mapper is trainable
self.mapper = torch.nn.Linear(512, 768)
p = 0.25
if augment:
self.augment = K.AugmentationSequential(
K.RandomHorizontalFlip(p=0.5),
K.RandomEqualize(p=p),
# K.RandomPlanckianJitter(p=p),
# K.RandomPlasmaBrightness(p=p),
# K.RandomPlasmaContrast(p=p),
# K.ColorJiggle(0.02, 0.2, 0.2, p=p),
)
else:
self.augment = False
def forward(self, img):
if isinstance(img, list):
# Uncondition
return torch.zeros((1, 1, 768), device=self.mapper.weight.device)
if self.augment is not None:
# Transforms require 0-1
img = self.augment((img + 1) / 2)
img = 2 * img - 1
feat = self.loss_fn(img, crop=True)
feat = self.mapper(feat.unsqueeze(1))
return feat
def encode(self, img):
return self(img)
class FrozenCLIPEmbedder(AbstractEncoder):
"""Uses the CLIP transformer encoder for text (from huggingface)"""
def __init__(
self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77
): # clip-vit-base-patch32
super().__init__()
self.tokenizer = CLIPTokenizer.from_pretrained(version)
self.transformer = CLIPTextModel.from_pretrained(version)
self.device = device
self.max_length = max_length # TODO: typical value?
self.freeze()
def freeze(self):
self.transformer = self.transformer.eval()
# self.train = disabled_train
for param in self.parameters():
param.requires_grad = False
def forward(self, text):
batch_encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_length,
return_length=True,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt",
)
tokens = batch_encoding["input_ids"].to(self.device)
outputs = self.transformer(input_ids=tokens)
z = outputs.last_hidden_state
return z
def encode(self, text):
return self(text)
import torch.nn.functional as F
from transformers import CLIPVisionModel
class ClipImageProjector(AbstractEncoder):
"""
Uses the CLIP image encoder.
"""
def __init__(
self, version="openai/clip-vit-large-patch14", max_length=77
): # clip-vit-base-patch32
super().__init__()
self.model = CLIPVisionModel.from_pretrained(version)
self.model.train()
self.max_length = max_length # TODO: typical value?
self.antialias = True
self.mapper = torch.nn.Linear(1024, 768)
self.register_buffer(
"mean", torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False
)
self.register_buffer(
"std", torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False
)
null_cond = self.get_null_cond(version, max_length)
self.register_buffer("null_cond", null_cond)
@torch.no_grad()
def get_null_cond(self, version, max_length):
device = self.mean.device
embedder = FrozenCLIPEmbedder(
version=version, device=device, max_length=max_length
)
null_cond = embedder([""])
return null_cond
def preprocess(self, x):
# Expects inputs in the range -1, 1
x = kornia.geometry.resize(
x,
(224, 224),
interpolation="bicubic",
align_corners=True,
antialias=self.antialias,
)
x = (x + 1.0) / 2.0
# renormalize according to clip
x = kornia.enhance.normalize(x, self.mean, self.std)
return x
def forward(self, x):
if isinstance(x, list):
return self.null_cond
# x is assumed to be in range [-1,1]
x = self.preprocess(x)
outputs = self.model(pixel_values=x)
last_hidden_state = outputs.last_hidden_state
last_hidden_state = self.mapper(last_hidden_state)
return F.pad(
last_hidden_state,
[0, 0, 0, self.max_length - last_hidden_state.shape[1], 0, 0],
)
def encode(self, im):
return self(im)
class ProjectedFrozenCLIPEmbedder(AbstractEncoder):
def __init__(
self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77
): # clip-vit-base-patch32
super().__init__()
self.embedder = FrozenCLIPEmbedder(
version=version, device=device, max_length=max_length
)
self.projection = torch.nn.Linear(768, 768)
def forward(self, text):
z = self.embedder(text)
return self.projection(z)
def encode(self, text):
return self(text)
class FrozenCLIPImageEmbedder(AbstractEncoder):
"""
Uses the CLIP image encoder.
Not actually frozen... If you want that set cond_stage_trainable=False in cfg
"""
def __init__(
self,
model="ViT-L/14",
jit=False,
device="cpu",
antialias=False,
):
super().__init__()
self.model, _ = clip.load(name=model, device=device, jit=jit)
# We don't use the text part so delete it
del self.model.transformer
self.antialias = antialias
self.register_buffer(
"mean", torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False
)
self.register_buffer(
"std", torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False
)
def preprocess(self, x):
# Expects inputs in the range -1, 1
x = kornia.geometry.resize(
x,
(224, 224),
interpolation="bicubic",
align_corners=True,
antialias=self.antialias,
)
x = (x + 1.0) / 2.0
# renormalize according to clip
x = kornia.enhance.normalize(x, self.mean, self.std)
return x
def forward(self, x):
# x is assumed to be in range [-1,1]
if isinstance(x, list):
# [""] denotes condition dropout for ucg
device = self.model.visual.conv1.weight.device
return torch.zeros(1, 768, device=device)
return self.model.encode_image(self.preprocess(x)).float()
def encode(self, im):
return self(im).unsqueeze(1)
import random
from torchvision import transforms
class FrozenCLIPImageMutliEmbedder(AbstractEncoder):
"""
Uses the CLIP image encoder.
Not actually frozen... If you want that set cond_stage_trainable=False in cfg
"""
def __init__(
self,
model="ViT-L/14",
jit=False,
device="cpu",
antialias=True,
max_crops=5,
):
super().__init__()
self.model, _ = clip.load(name=model, device=device, jit=jit)
# We don't use the text part so delete it
del self.model.transformer
self.antialias = antialias
self.register_buffer(
"mean", torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False
)
self.register_buffer(
"std", torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False
)
self.max_crops = max_crops
def preprocess(self, x):
# Expects inputs in the range -1, 1
randcrop = transforms.RandomResizedCrop(224, scale=(0.085, 1.0), ratio=(1, 1))
max_crops = self.max_crops
patches = []
crops = [randcrop(x) for _ in range(max_crops)]
patches.extend(crops)
x = torch.cat(patches, dim=0)
x = (x + 1.0) / 2.0
# renormalize according to clip
x = kornia.enhance.normalize(x, self.mean, self.std)
return x
def forward(self, x):
# x is assumed to be in range [-1,1]
if isinstance(x, list):
# [""] denotes condition dropout for ucg
device = self.model.visual.conv1.weight.device
return torch.zeros(1, self.max_crops, 768, device=device)
batch_tokens = []
for im in x:
patches = self.preprocess(im.unsqueeze(0))
tokens = self.model.encode_image(patches).float()
for t in tokens:
if random.random() < 0.1:
t *= 0
batch_tokens.append(tokens.unsqueeze(0))
return torch.cat(batch_tokens, dim=0)
def encode(self, im):
return self(im)
class SpatialRescaler(nn.Module):
def __init__(
self,
n_stages=1,
method="bilinear",
multiplier=0.5,
in_channels=3,
out_channels=None,
bias=False,
):
super().__init__()
self.n_stages = n_stages
assert self.n_stages >= 0
assert method in [
"nearest",
"linear",
"bilinear",
"trilinear",
"bicubic",
"area",
]
self.multiplier = multiplier
self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
self.remap_output = out_channels is not None
if self.remap_output:
print(
f"Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing."
)
self.channel_mapper = nn.Conv2d(in_channels, out_channels, 1, bias=bias)
def forward(self, x):
for stage in range(self.n_stages):
x = self.interpolator(x, scale_factor=self.multiplier)
if self.remap_output:
x = self.channel_mapper(x)
return x
def encode(self, x):
return self(x)
from extern.ldm_zero123.modules.diffusionmodules.util import (
extract_into_tensor,
make_beta_schedule,
noise_like,
)
from extern.ldm_zero123.util import instantiate_from_config
class LowScaleEncoder(nn.Module):
def __init__(
self,
model_config,
linear_start,
linear_end,
timesteps=1000,
max_noise_level=250,
output_size=64,
scale_factor=1.0,
):
super().__init__()
self.max_noise_level = max_noise_level
self.model = instantiate_from_config(model_config)
self.augmentation_schedule = self.register_schedule(
timesteps=timesteps, linear_start=linear_start, linear_end=linear_end
)
self.out_size = output_size
self.scale_factor = scale_factor
def register_schedule(
self,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
):
betas = make_beta_schedule(
beta_schedule,
timesteps,
linear_start=linear_start,
linear_end=linear_end,
cosine_s=cosine_s,
)
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
(timesteps,) = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert (
alphas_cumprod.shape[0] == self.num_timesteps
), "alphas have to be defined for each timestep"
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer("betas", to_torch(betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer(
"sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod))
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))
)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def forward(self, x):
z = self.model.encode(x).sample()
z = z * self.scale_factor
noise_level = torch.randint(
0, self.max_noise_level, (x.shape[0],), device=x.device
).long()
z = self.q_sample(z, noise_level)
if self.out_size is not None:
z = torch.nn.functional.interpolate(
z, size=self.out_size, mode="nearest"
) # TODO: experiment with mode
# z = z.repeat_interleave(2, -2).repeat_interleave(2, -1)
return z, noise_level
def decode(self, z):
z = z / self.scale_factor
return self.model.decode(z)
if __name__ == "__main__":
from extern.ldm_zero123.util import count_params
sentences = [
"a hedgehog drinking a whiskey",
"der mond ist aufgegangen",
"Ein Satz mit vielen Sonderzeichen: äöü ß ?! : 'xx-y/@s'",
]
model = FrozenT5Embedder(version="google/t5-v1_1-xl").cuda()
count_params(model, True)
z = model(sentences)
print(z.shape)
model = FrozenCLIPEmbedder().cuda()
count_params(model, True)
z = model(sentences)
print(z.shape)
print("done.")
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/encoders/__init__.py | extern/ldm_zero123/modules/encoders/__init__.py | python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false | |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/evaluate/torch_frechet_video_distance.py | extern/ldm_zero123/modules/evaluate/torch_frechet_video_distance.py | # based on https://github.com/universome/fvd-comparison/blob/master/compare_models.py; huge thanks!
import glob
import hashlib
import html
import io
import multiprocessing as mp
import os
import re
import urllib
import urllib.request
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy as np
import requests
import scipy.linalg
import torch
from torchvision.io import read_video
from tqdm import tqdm
torch.set_grad_enabled(False)
from einops import rearrange
from nitro.util import isvideo
def compute_frechet_distance(mu_sample, sigma_sample, mu_ref, sigma_ref) -> float:
print("Calculate frechet distance...")
m = np.square(mu_sample - mu_ref).sum()
s, _ = scipy.linalg.sqrtm(
np.dot(sigma_sample, sigma_ref), disp=False
) # pylint: disable=no-member
fid = np.real(m + np.trace(sigma_sample + sigma_ref - s * 2))
return float(fid)
def compute_stats(feats: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
mu = feats.mean(axis=0) # [d]
sigma = np.cov(feats, rowvar=False) # [d, d]
return mu, sigma
def open_url(
url: str,
num_attempts: int = 10,
verbose: bool = True,
return_filename: bool = False,
) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert num_attempts >= 1
# Doesn't look like an URL scheme so interpret it as a local filename.
if not re.match("^[a-z]+://", url):
return url if return_filename else open(url, "rb")
# Handle file URLs. This code handles unusual file:// patterns that
# arise on Windows:
#
# file:///c:/foo.txt
#
# which would translate to a local '/c:/foo.txt' filename that's
# invalid. Drop the forward slash for such pathnames.
#
# If you touch this code path, you should test it on both Linux and
# Windows.
#
# Some internet resources suggest using urllib.request.url2pathname() but
# but that converts forward slashes to backslashes and this causes
# its own set of problems.
if url.startswith("file://"):
filename = urllib.parse.urlparse(url).path
if re.match(r"^/[a-zA-Z]:", filename):
filename = filename[1:]
return filename if return_filename else open(filename, "rb")
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if len(res.content) < 8192:
content_str = res.content.decode("utf-8")
if "download_warning" in res.headers.get("Set-Cookie", ""):
links = [
html.unescape(link)
for link in content_str.split('"')
if "export=download" in link
]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
if "Google Drive - Quota exceeded" in content_str:
raise IOError(
"Google Drive download quota exceeded -- please try again later"
)
match = re.search(
r'filename="([^"]*)"',
res.headers.get("Content-Disposition", ""),
)
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except KeyboardInterrupt:
raise
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Return data as file object.
assert not return_filename
return io.BytesIO(url_data)
def load_video(ip):
vid, *_ = read_video(ip)
vid = rearrange(vid, "t h w c -> t c h w").to(torch.uint8)
return vid
def get_data_from_str(input_str, nprc=None):
assert os.path.isdir(
input_str
), f'Specified input folder "{input_str}" is not a directory'
vid_filelist = glob.glob(os.path.join(input_str, "*.mp4"))
print(f"Found {len(vid_filelist)} videos in dir {input_str}")
if nprc is None:
try:
nprc = mp.cpu_count()
except NotImplementedError:
print(
"WARNING: cpu_count() not avlailable, using only 1 cpu for video loading"
)
nprc = 1
pool = mp.Pool(processes=nprc)
vids = []
for v in tqdm(
pool.imap_unordered(load_video, vid_filelist),
total=len(vid_filelist),
desc="Loading videos...",
):
vids.append(v)
vids = torch.stack(vids, dim=0).float()
return vids
def get_stats(stats):
assert os.path.isfile(stats) and stats.endswith(
".npz"
), f"no stats found under {stats}"
print(f"Using precomputed statistics under {stats}")
stats = np.load(stats)
stats = {key: stats[key] for key in stats.files}
return stats
@torch.no_grad()
def compute_fvd(
ref_input, sample_input, bs=32, ref_stats=None, sample_stats=None, nprc_load=None
):
calc_stats = ref_stats is None or sample_stats is None
if calc_stats:
only_ref = sample_stats is not None
only_sample = ref_stats is not None
if isinstance(ref_input, str) and not only_sample:
ref_input = get_data_from_str(ref_input, nprc_load)
if isinstance(sample_input, str) and not only_ref:
sample_input = get_data_from_str(sample_input, nprc_load)
stats = compute_statistics(
sample_input,
ref_input,
device="cuda" if torch.cuda.is_available() else "cpu",
bs=bs,
only_ref=only_ref,
only_sample=only_sample,
)
if only_ref:
stats.update(get_stats(sample_stats))
elif only_sample:
stats.update(get_stats(ref_stats))
else:
stats = get_stats(sample_stats)
stats.update(get_stats(ref_stats))
fvd = compute_frechet_distance(**stats)
return {
"FVD": fvd,
}
@torch.no_grad()
def compute_statistics(
videos_fake,
videos_real,
device: str = "cuda",
bs=32,
only_ref=False,
only_sample=False,
) -> Dict:
detector_url = "https://www.dropbox.com/s/ge9e5ujwgetktms/i3d_torchscript.pt?dl=1"
detector_kwargs = dict(
rescale=True, resize=True, return_features=True
) # Return raw features before the softmax layer.
with open_url(detector_url, verbose=False) as f:
detector = torch.jit.load(f).eval().to(device)
assert not (
only_sample and only_ref
), "only_ref and only_sample arguments are mutually exclusive"
ref_embed, sample_embed = [], []
info = f"Computing I3D activations for FVD score with batch size {bs}"
if only_ref:
if not isvideo(videos_real):
# if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255]
videos_real = torch.from_numpy(videos_real).permute(0, 4, 1, 2, 3).float()
print(videos_real.shape)
if videos_real.shape[0] % bs == 0:
n_secs = videos_real.shape[0] // bs
else:
n_secs = videos_real.shape[0] // bs + 1
videos_real = torch.tensor_split(videos_real, n_secs, dim=0)
for ref_v in tqdm(videos_real, total=len(videos_real), desc=info):
feats_ref = (
detector(ref_v.to(device).contiguous(), **detector_kwargs).cpu().numpy()
)
ref_embed.append(feats_ref)
elif only_sample:
if not isvideo(videos_fake):
# if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255]
videos_fake = torch.from_numpy(videos_fake).permute(0, 4, 1, 2, 3).float()
print(videos_fake.shape)
if videos_fake.shape[0] % bs == 0:
n_secs = videos_fake.shape[0] // bs
else:
n_secs = videos_fake.shape[0] // bs + 1
videos_real = torch.tensor_split(videos_real, n_secs, dim=0)
for sample_v in tqdm(videos_fake, total=len(videos_real), desc=info):
feats_sample = (
detector(sample_v.to(device).contiguous(), **detector_kwargs)
.cpu()
.numpy()
)
sample_embed.append(feats_sample)
else:
if not isvideo(videos_real):
# if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255]
videos_real = torch.from_numpy(videos_real).permute(0, 4, 1, 2, 3).float()
if not isvideo(videos_fake):
videos_fake = torch.from_numpy(videos_fake).permute(0, 4, 1, 2, 3).float()
if videos_fake.shape[0] % bs == 0:
n_secs = videos_fake.shape[0] // bs
else:
n_secs = videos_fake.shape[0] // bs + 1
videos_real = torch.tensor_split(videos_real, n_secs, dim=0)
videos_fake = torch.tensor_split(videos_fake, n_secs, dim=0)
for ref_v, sample_v in tqdm(
zip(videos_real, videos_fake), total=len(videos_fake), desc=info
):
# print(ref_v.shape)
# ref_v = torch.nn.functional.interpolate(ref_v, size=(sample_v.shape[2], 256, 256), mode='trilinear', align_corners=False)
# sample_v = torch.nn.functional.interpolate(sample_v, size=(sample_v.shape[2], 256, 256), mode='trilinear', align_corners=False)
feats_sample = (
detector(sample_v.to(device).contiguous(), **detector_kwargs)
.cpu()
.numpy()
)
feats_ref = (
detector(ref_v.to(device).contiguous(), **detector_kwargs).cpu().numpy()
)
sample_embed.append(feats_sample)
ref_embed.append(feats_ref)
out = dict()
if len(sample_embed) > 0:
sample_embed = np.concatenate(sample_embed, axis=0)
mu_sample, sigma_sample = compute_stats(sample_embed)
out.update({"mu_sample": mu_sample, "sigma_sample": sigma_sample})
if len(ref_embed) > 0:
ref_embed = np.concatenate(ref_embed, axis=0)
mu_ref, sigma_ref = compute_stats(ref_embed)
out.update({"mu_ref": mu_ref, "sigma_ref": sigma_ref})
return out
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/evaluate/ssim.py | extern/ldm_zero123/modules/evaluate/ssim.py | # MIT Licence
# Methods to predict the SSIM, taken from
# https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def gaussian(window_size, sigma):
gauss = torch.Tensor(
[
exp(-((x - window_size // 2) ** 2) / float(2 * sigma**2))
for x in range(window_size)
]
)
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(
_2D_window.expand(channel, 1, window_size, window_size).contiguous()
)
return window
def _ssim(img1, img2, window, window_size, channel, mask=None, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = (
F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
)
sigma2_sq = (
F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
)
sigma12 = (
F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel)
- mu1_mu2
)
C1 = (0.01) ** 2
C2 = (0.03) ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
)
if not (mask is None):
b = mask.size(0)
ssim_map = ssim_map.mean(dim=1, keepdim=True) * mask
ssim_map = ssim_map.view(b, -1).sum(dim=1) / mask.view(b, -1).sum(dim=1).clamp(
min=1
)
return ssim_map
import pdb
pdb.set_trace
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2, mask=None):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(
img1,
img2,
window,
self.window_size,
channel,
mask,
self.size_average,
)
def ssim(img1, img2, window_size=11, mask=None, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, mask, size_average)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/evaluate/frechet_video_distance.py | extern/ldm_zero123/modules/evaluate/frechet_video_distance.py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Minimal Reference implementation for the Frechet Video Distance (FVD).
FVD is a metric for the quality of video generation models. It is inspired by
the FID (Frechet Inception Distance) used for images, but uses a different
embedding to be better suitable for videos.
"""
from __future__ import absolute_import, division, print_function
import six
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
import tensorflow_hub as hub
def preprocess(videos, target_resolution):
"""Runs some preprocessing on the videos for I3D model.
Args:
videos: <T>[batch_size, num_frames, height, width, depth] The videos to be
preprocessed. We don't care about the specific dtype of the videos, it can
be anything that tf.image.resize_bilinear accepts. Values are expected to
be in the range 0-255.
target_resolution: (width, height): target video resolution
Returns:
videos: <float32>[batch_size, num_frames, height, width, depth]
"""
videos_shape = list(videos.shape)
all_frames = tf.reshape(videos, [-1] + videos_shape[-3:])
resized_videos = tf.image.resize_bilinear(all_frames, size=target_resolution)
target_shape = [videos_shape[0], -1] + list(target_resolution) + [3]
output_videos = tf.reshape(resized_videos, target_shape)
scaled_videos = 2.0 * tf.cast(output_videos, tf.float32) / 255.0 - 1
return scaled_videos
def _is_in_graph(tensor_name):
"""Checks whether a given tensor does exists in the graph."""
try:
tf.get_default_graph().get_tensor_by_name(tensor_name)
except KeyError:
return False
return True
def create_id3_embedding(videos, warmup=False, batch_size=16):
"""Embeds the given videos using the Inflated 3D Convolution ne twork.
Downloads the graph of the I3D from tf.hub and adds it to the graph on the
first call.
Args:
videos: <float32>[batch_size, num_frames, height=224, width=224, depth=3].
Expected range is [-1, 1].
Returns:
embedding: <float32>[batch_size, embedding_size]. embedding_size depends
on the model used.
Raises:
ValueError: when a provided embedding_layer is not supported.
"""
# batch_size = 16
module_spec = "https://tfhub.dev/deepmind/i3d-kinetics-400/1"
# Making sure that we import the graph separately for
# each different input video tensor.
module_name = "fvd_kinetics-400_id3_module_" + six.ensure_str(videos.name).replace(
":", "_"
)
assert_ops = [
tf.Assert(
tf.reduce_max(videos) <= 1.001, ["max value in frame is > 1", videos]
),
tf.Assert(
tf.reduce_min(videos) >= -1.001, ["min value in frame is < -1", videos]
),
tf.assert_equal(
tf.shape(videos)[0],
batch_size,
["invalid frame batch size: ", tf.shape(videos)],
summarize=6,
),
]
with tf.control_dependencies(assert_ops):
videos = tf.identity(videos)
module_scope = "%s_apply_default/" % module_name
# To check whether the module has already been loaded into the graph, we look
# for a given tensor name. If this tensor name exists, we assume the function
# has been called before and the graph was imported. Otherwise we import it.
# Note: in theory, the tensor could exist, but have wrong shapes.
# This will happen if create_id3_embedding is called with a frames_placehoder
# of wrong size/batch size, because even though that will throw a tf.Assert
# on graph-execution time, it will insert the tensor (with wrong shape) into
# the graph. This is why we need the following assert.
if warmup:
video_batch_size = int(videos.shape[0])
assert video_batch_size in [
batch_size,
-1,
None,
], f"Invalid batch size {video_batch_size}"
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
if not _is_in_graph(tensor_name):
i3d_model = hub.Module(module_spec, name=module_name)
i3d_model(videos)
# gets the kinetics-i3d-400-logits layer
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)
return tensor
def calculate_fvd(real_activations, generated_activations):
"""Returns a list of ops that compute metrics as funcs of activations.
Args:
real_activations: <float32>[num_samples, embedding_size]
generated_activations: <float32>[num_samples, embedding_size]
Returns:
A scalar that contains the requested FVD.
"""
return tfgan.eval.frechet_classifier_distance_from_activations(
real_activations, generated_activations
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/evaluate/adm_evaluator.py | extern/ldm_zero123/modules/evaluate/adm_evaluator.py | import argparse
import io
import os
import random
import warnings
import zipfile
from abc import ABC, abstractmethod
from contextlib import contextmanager
from functools import partial
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from typing import Iterable, Optional, Tuple
import numpy as np
import requests
import tensorflow.compat.v1 as tf
import yaml
from scipy import linalg
from tqdm.auto import tqdm
INCEPTION_V3_URL = "https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/classify_image_graph_def.pb"
INCEPTION_V3_PATH = "classify_image_graph_def.pb"
FID_POOL_NAME = "pool_3:0"
FID_SPATIAL_NAME = "mixed_6/conv:0"
REQUIREMENTS = (
f"This script has the following requirements: \n"
"tensorflow-gpu>=2.0" + "\n" + "scipy" + "\n" + "requests" + "\n" + "tqdm"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--ref_batch", help="path to reference batch npz file")
parser.add_argument("--sample_batch", help="path to sample batch npz file")
args = parser.parse_args()
config = tf.ConfigProto(
allow_soft_placement=True # allows DecodeJpeg to run on CPU in Inception graph
)
config.gpu_options.allow_growth = True
evaluator = Evaluator(tf.Session(config=config))
print("warming up TensorFlow...")
# This will cause TF to print a bunch of verbose stuff now rather
# than after the next print(), to help prevent confusion.
evaluator.warmup()
print("computing reference batch activations...")
ref_acts = evaluator.read_activations(args.ref_batch)
print("computing/reading reference batch statistics...")
ref_stats, ref_stats_spatial = evaluator.read_statistics(args.ref_batch, ref_acts)
print("computing sample batch activations...")
sample_acts = evaluator.read_activations(args.sample_batch)
print("computing/reading sample batch statistics...")
sample_stats, sample_stats_spatial = evaluator.read_statistics(
args.sample_batch, sample_acts
)
print("Computing evaluations...")
is_ = evaluator.compute_inception_score(sample_acts[0])
print("Inception Score:", is_)
fid = sample_stats.frechet_distance(ref_stats)
print("FID:", fid)
sfid = sample_stats_spatial.frechet_distance(ref_stats_spatial)
print("sFID:", sfid)
prec, recall = evaluator.compute_prec_recall(ref_acts[0], sample_acts[0])
print("Precision:", prec)
print("Recall:", recall)
savepath = "/".join(args.sample_batch.split("/")[:-1])
results_file = os.path.join(savepath, "evaluation_metrics.yaml")
print(f'Saving evaluation results to "{results_file}"')
results = {
"IS": is_,
"FID": fid,
"sFID": sfid,
"Precision:": prec,
"Recall": recall,
}
with open(results_file, "w") as f:
yaml.dump(results, f, default_flow_style=False)
class InvalidFIDException(Exception):
pass
class FIDStatistics:
def __init__(self, mu: np.ndarray, sigma: np.ndarray):
self.mu = mu
self.sigma = sigma
def frechet_distance(self, other, eps=1e-6):
"""
Compute the Frechet distance between two sets of statistics.
"""
# https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L132
mu1, sigma1 = self.mu, self.sigma
mu2, sigma2 = other.mu, other.sigma
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), f"Training and test mean vectors have different lengths: {mu1.shape}, {mu2.shape}"
assert (
sigma1.shape == sigma2.shape
), f"Training and test covariances have different dimensions: {sigma1.shape}, {sigma2.shape}"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; adding %s to diagonal of cov estimates"
% eps
)
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
class Evaluator:
def __init__(
self,
session,
batch_size=64,
softmax_batch_size=512,
):
self.sess = session
self.batch_size = batch_size
self.softmax_batch_size = softmax_batch_size
self.manifold_estimator = ManifoldEstimator(session)
with self.sess.graph.as_default():
self.image_input = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.softmax_input = tf.placeholder(tf.float32, shape=[None, 2048])
self.pool_features, self.spatial_features = _create_feature_graph(
self.image_input
)
self.softmax = _create_softmax_graph(self.softmax_input)
def warmup(self):
self.compute_activations(np.zeros([1, 8, 64, 64, 3]))
def read_activations(self, npz_path: str) -> Tuple[np.ndarray, np.ndarray]:
with open_npz_array(npz_path, "arr_0") as reader:
return self.compute_activations(reader.read_batches(self.batch_size))
def compute_activations(
self, batches: Iterable[np.ndarray], silent=False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute image features for downstream evals.
:param batches: a iterator over NHWC numpy arrays in [0, 255].
:return: a tuple of numpy arrays of shape [N x X], where X is a feature
dimension. The tuple is (pool_3, spatial).
"""
preds = []
spatial_preds = []
it = batches if silent else tqdm(batches)
for batch in it:
batch = batch.astype(np.float32)
pred, spatial_pred = self.sess.run(
[self.pool_features, self.spatial_features], {self.image_input: batch}
)
preds.append(pred.reshape([pred.shape[0], -1]))
spatial_preds.append(spatial_pred.reshape([spatial_pred.shape[0], -1]))
return (
np.concatenate(preds, axis=0),
np.concatenate(spatial_preds, axis=0),
)
def read_statistics(
self, npz_path: str, activations: Tuple[np.ndarray, np.ndarray]
) -> Tuple[FIDStatistics, FIDStatistics]:
obj = np.load(npz_path)
if "mu" in list(obj.keys()):
return FIDStatistics(obj["mu"], obj["sigma"]), FIDStatistics(
obj["mu_s"], obj["sigma_s"]
)
return tuple(self.compute_statistics(x) for x in activations)
def compute_statistics(self, activations: np.ndarray) -> FIDStatistics:
mu = np.mean(activations, axis=0)
sigma = np.cov(activations, rowvar=False)
return FIDStatistics(mu, sigma)
def compute_inception_score(
self, activations: np.ndarray, split_size: int = 5000
) -> float:
softmax_out = []
for i in range(0, len(activations), self.softmax_batch_size):
acts = activations[i : i + self.softmax_batch_size]
softmax_out.append(
self.sess.run(self.softmax, feed_dict={self.softmax_input: acts})
)
preds = np.concatenate(softmax_out, axis=0)
# https://github.com/openai/improved-gan/blob/4f5d1ec5c16a7eceb206f42bfc652693601e1d5c/inception_score/model.py#L46
scores = []
for i in range(0, len(preds), split_size):
part = preds[i : i + split_size]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return float(np.mean(scores))
def compute_prec_recall(
self, activations_ref: np.ndarray, activations_sample: np.ndarray
) -> Tuple[float, float]:
radii_1 = self.manifold_estimator.manifold_radii(activations_ref)
radii_2 = self.manifold_estimator.manifold_radii(activations_sample)
pr = self.manifold_estimator.evaluate_pr(
activations_ref, radii_1, activations_sample, radii_2
)
return (float(pr[0][0]), float(pr[1][0]))
class ManifoldEstimator:
"""
A helper for comparing manifolds of feature vectors.
Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L57
"""
def __init__(
self,
session,
row_batch_size=10000,
col_batch_size=10000,
nhood_sizes=(3,),
clamp_to_percentile=None,
eps=1e-5,
):
"""
Estimate the manifold of given feature vectors.
:param session: the TensorFlow session.
:param row_batch_size: row batch size to compute pairwise distances
(parameter to trade-off between memory usage and performance).
:param col_batch_size: column batch size to compute pairwise distances.
:param nhood_sizes: number of neighbors used to estimate the manifold.
:param clamp_to_percentile: prune hyperspheres that have radius larger than
the given percentile.
:param eps: small number for numerical stability.
"""
self.distance_block = DistanceBlock(session)
self.row_batch_size = row_batch_size
self.col_batch_size = col_batch_size
self.nhood_sizes = nhood_sizes
self.num_nhoods = len(nhood_sizes)
self.clamp_to_percentile = clamp_to_percentile
self.eps = eps
def warmup(self):
feats, radii = (
np.zeros([1, 2048], dtype=np.float32),
np.zeros([1, 1], dtype=np.float32),
)
self.evaluate_pr(feats, radii, feats, radii)
def manifold_radii(self, features: np.ndarray) -> np.ndarray:
num_images = len(features)
# Estimate manifold of features by calculating distances to k-NN of each sample.
radii = np.zeros([num_images, self.num_nhoods], dtype=np.float32)
distance_batch = np.zeros([self.row_batch_size, num_images], dtype=np.float32)
seq = np.arange(max(self.nhood_sizes) + 1, dtype=np.int32)
for begin1 in range(0, num_images, self.row_batch_size):
end1 = min(begin1 + self.row_batch_size, num_images)
row_batch = features[begin1:end1]
for begin2 in range(0, num_images, self.col_batch_size):
end2 = min(begin2 + self.col_batch_size, num_images)
col_batch = features[begin2:end2]
# Compute distances between batches.
distance_batch[
0 : end1 - begin1, begin2:end2
] = self.distance_block.pairwise_distances(row_batch, col_batch)
# Find the k-nearest neighbor from the current batch.
radii[begin1:end1, :] = np.concatenate(
[
x[:, self.nhood_sizes]
for x in _numpy_partition(
distance_batch[0 : end1 - begin1, :], seq, axis=1
)
],
axis=0,
)
if self.clamp_to_percentile is not None:
max_distances = np.percentile(radii, self.clamp_to_percentile, axis=0)
radii[radii > max_distances] = 0
return radii
def evaluate(
self, features: np.ndarray, radii: np.ndarray, eval_features: np.ndarray
):
"""
Evaluate if new feature vectors are at the manifold.
"""
num_eval_images = eval_features.shape[0]
num_ref_images = radii.shape[0]
distance_batch = np.zeros(
[self.row_batch_size, num_ref_images], dtype=np.float32
)
batch_predictions = np.zeros([num_eval_images, self.num_nhoods], dtype=np.int32)
max_realism_score = np.zeros([num_eval_images], dtype=np.float32)
nearest_indices = np.zeros([num_eval_images], dtype=np.int32)
for begin1 in range(0, num_eval_images, self.row_batch_size):
end1 = min(begin1 + self.row_batch_size, num_eval_images)
feature_batch = eval_features[begin1:end1]
for begin2 in range(0, num_ref_images, self.col_batch_size):
end2 = min(begin2 + self.col_batch_size, num_ref_images)
ref_batch = features[begin2:end2]
distance_batch[
0 : end1 - begin1, begin2:end2
] = self.distance_block.pairwise_distances(feature_batch, ref_batch)
# From the minibatch of new feature vectors, determine if they are in the estimated manifold.
# If a feature vector is inside a hypersphere of some reference sample, then
# the new sample lies at the estimated manifold.
# The radii of the hyperspheres are determined from distances of neighborhood size k.
samples_in_manifold = distance_batch[0 : end1 - begin1, :, None] <= radii
batch_predictions[begin1:end1] = np.any(samples_in_manifold, axis=1).astype(
np.int32
)
max_realism_score[begin1:end1] = np.max(
radii[:, 0] / (distance_batch[0 : end1 - begin1, :] + self.eps), axis=1
)
nearest_indices[begin1:end1] = np.argmin(
distance_batch[0 : end1 - begin1, :], axis=1
)
return {
"fraction": float(np.mean(batch_predictions)),
"batch_predictions": batch_predictions,
"max_realisim_score": max_realism_score,
"nearest_indices": nearest_indices,
}
def evaluate_pr(
self,
features_1: np.ndarray,
radii_1: np.ndarray,
features_2: np.ndarray,
radii_2: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Evaluate precision and recall efficiently.
:param features_1: [N1 x D] feature vectors for reference batch.
:param radii_1: [N1 x K1] radii for reference vectors.
:param features_2: [N2 x D] feature vectors for the other batch.
:param radii_2: [N x K2] radii for other vectors.
:return: a tuple of arrays for (precision, recall):
- precision: an np.ndarray of length K1
- recall: an np.ndarray of length K2
"""
features_1_status = np.zeros([len(features_1), radii_2.shape[1]], dtype=np.bool)
features_2_status = np.zeros([len(features_2), radii_1.shape[1]], dtype=np.bool)
for begin_1 in range(0, len(features_1), self.row_batch_size):
end_1 = begin_1 + self.row_batch_size
batch_1 = features_1[begin_1:end_1]
for begin_2 in range(0, len(features_2), self.col_batch_size):
end_2 = begin_2 + self.col_batch_size
batch_2 = features_2[begin_2:end_2]
batch_1_in, batch_2_in = self.distance_block.less_thans(
batch_1, radii_1[begin_1:end_1], batch_2, radii_2[begin_2:end_2]
)
features_1_status[begin_1:end_1] |= batch_1_in
features_2_status[begin_2:end_2] |= batch_2_in
return (
np.mean(features_2_status.astype(np.float64), axis=0),
np.mean(features_1_status.astype(np.float64), axis=0),
)
class DistanceBlock:
"""
Calculate pairwise distances between vectors.
Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L34
"""
def __init__(self, session):
self.session = session
# Initialize TF graph to calculate pairwise distances.
with session.graph.as_default():
self._features_batch1 = tf.placeholder(tf.float32, shape=[None, None])
self._features_batch2 = tf.placeholder(tf.float32, shape=[None, None])
distance_block_16 = _batch_pairwise_distances(
tf.cast(self._features_batch1, tf.float16),
tf.cast(self._features_batch2, tf.float16),
)
self.distance_block = tf.cond(
tf.reduce_all(tf.math.is_finite(distance_block_16)),
lambda: tf.cast(distance_block_16, tf.float32),
lambda: _batch_pairwise_distances(
self._features_batch1, self._features_batch2
),
)
# Extra logic for less thans.
self._radii1 = tf.placeholder(tf.float32, shape=[None, None])
self._radii2 = tf.placeholder(tf.float32, shape=[None, None])
dist32 = tf.cast(self.distance_block, tf.float32)[..., None]
self._batch_1_in = tf.math.reduce_any(dist32 <= self._radii2, axis=1)
self._batch_2_in = tf.math.reduce_any(
dist32 <= self._radii1[:, None], axis=0
)
def pairwise_distances(self, U, V):
"""
Evaluate pairwise distances between two batches of feature vectors.
"""
return self.session.run(
self.distance_block,
feed_dict={self._features_batch1: U, self._features_batch2: V},
)
def less_thans(self, batch_1, radii_1, batch_2, radii_2):
return self.session.run(
[self._batch_1_in, self._batch_2_in],
feed_dict={
self._features_batch1: batch_1,
self._features_batch2: batch_2,
self._radii1: radii_1,
self._radii2: radii_2,
},
)
def _batch_pairwise_distances(U, V):
"""
Compute pairwise distances between two batches of feature vectors.
"""
with tf.variable_scope("pairwise_dist_block"):
# Squared norms of each row in U and V.
norm_u = tf.reduce_sum(tf.square(U), 1)
norm_v = tf.reduce_sum(tf.square(V), 1)
# norm_u as a column and norm_v as a row vectors.
norm_u = tf.reshape(norm_u, [-1, 1])
norm_v = tf.reshape(norm_v, [1, -1])
# Pairwise squared Euclidean distances.
D = tf.maximum(norm_u - 2 * tf.matmul(U, V, False, True) + norm_v, 0.0)
return D
class NpzArrayReader(ABC):
@abstractmethod
def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
pass
@abstractmethod
def remaining(self) -> int:
pass
def read_batches(self, batch_size: int) -> Iterable[np.ndarray]:
def gen_fn():
while True:
batch = self.read_batch(batch_size)
if batch is None:
break
yield batch
rem = self.remaining()
num_batches = rem // batch_size + int(rem % batch_size != 0)
return BatchIterator(gen_fn, num_batches)
class BatchIterator:
def __init__(self, gen_fn, length):
self.gen_fn = gen_fn
self.length = length
def __len__(self):
return self.length
def __iter__(self):
return self.gen_fn()
class StreamingNpzArrayReader(NpzArrayReader):
def __init__(self, arr_f, shape, dtype):
self.arr_f = arr_f
self.shape = shape
self.dtype = dtype
self.idx = 0
def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
if self.idx >= self.shape[0]:
return None
bs = min(batch_size, self.shape[0] - self.idx)
self.idx += bs
if self.dtype.itemsize == 0:
return np.ndarray([bs, *self.shape[1:]], dtype=self.dtype)
read_count = bs * np.prod(self.shape[1:])
read_size = int(read_count * self.dtype.itemsize)
data = _read_bytes(self.arr_f, read_size, "array data")
return np.frombuffer(data, dtype=self.dtype).reshape([bs, *self.shape[1:]])
def remaining(self) -> int:
return max(0, self.shape[0] - self.idx)
class MemoryNpzArrayReader(NpzArrayReader):
def __init__(self, arr):
self.arr = arr
self.idx = 0
@classmethod
def load(cls, path: str, arr_name: str):
with open(path, "rb") as f:
arr = np.load(f)[arr_name]
return cls(arr)
def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
if self.idx >= self.arr.shape[0]:
return None
res = self.arr[self.idx : self.idx + batch_size]
self.idx += batch_size
return res
def remaining(self) -> int:
return max(0, self.arr.shape[0] - self.idx)
@contextmanager
def open_npz_array(path: str, arr_name: str) -> NpzArrayReader:
with _open_npy_file(path, arr_name) as arr_f:
version = np.lib.format.read_magic(arr_f)
if version == (1, 0):
header = np.lib.format.read_array_header_1_0(arr_f)
elif version == (2, 0):
header = np.lib.format.read_array_header_2_0(arr_f)
else:
yield MemoryNpzArrayReader.load(path, arr_name)
return
shape, fortran, dtype = header
if fortran or dtype.hasobject:
yield MemoryNpzArrayReader.load(path, arr_name)
else:
yield StreamingNpzArrayReader(arr_f, shape, dtype)
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Copied from: https://github.com/numpy/numpy/blob/fb215c76967739268de71aa4bda55dd1b062bc2e/numpy/lib/format.py#L788-L886
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
@contextmanager
def _open_npy_file(path: str, arr_name: str):
with open(path, "rb") as f:
with zipfile.ZipFile(f, "r") as zip_f:
if f"{arr_name}.npy" not in zip_f.namelist():
raise ValueError(f"missing {arr_name} in npz file")
with zip_f.open(f"{arr_name}.npy", "r") as arr_f:
yield arr_f
def _download_inception_model():
if os.path.exists(INCEPTION_V3_PATH):
return
print("downloading InceptionV3 model...")
with requests.get(INCEPTION_V3_URL, stream=True) as r:
r.raise_for_status()
tmp_path = INCEPTION_V3_PATH + ".tmp"
with open(tmp_path, "wb") as f:
for chunk in tqdm(r.iter_content(chunk_size=8192)):
f.write(chunk)
os.rename(tmp_path, INCEPTION_V3_PATH)
def _create_feature_graph(input_batch):
_download_inception_model()
prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
with open(INCEPTION_V3_PATH, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
pool3, spatial = tf.import_graph_def(
graph_def,
input_map={f"ExpandDims:0": input_batch},
return_elements=[FID_POOL_NAME, FID_SPATIAL_NAME],
name=prefix,
)
_update_shapes(pool3)
spatial = spatial[..., :7]
return pool3, spatial
def _create_softmax_graph(input_batch):
_download_inception_model()
prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
with open(INCEPTION_V3_PATH, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
(matmul,) = tf.import_graph_def(
graph_def, return_elements=[f"softmax/logits/MatMul"], name=prefix
)
w = matmul.inputs[1]
logits = tf.matmul(input_batch, w)
return tf.nn.softmax(logits)
def _update_shapes(pool3):
# https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L50-L63
ops = pool3.graph.get_operations()
for op in ops:
for o in op.outputs:
shape = o.get_shape()
if shape._dims is not None: # pylint: disable=protected-access
# shape = [s.value for s in shape] TF 1.x
shape = [s for s in shape] # TF 2.x
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__["_shape_val"] = tf.TensorShape(new_shape)
return pool3
def _numpy_partition(arr, kth, **kwargs):
num_workers = min(cpu_count(), len(arr))
chunk_size = len(arr) // num_workers
extra = len(arr) % num_workers
start_idx = 0
batches = []
for i in range(num_workers):
size = chunk_size + (1 if i < extra else 0)
batches.append(arr[start_idx : start_idx + size])
start_idx += size
with ThreadPool(num_workers) as pool:
return list(pool.map(partial(np.partition, kth=kth, **kwargs), batches))
if __name__ == "__main__":
print(REQUIREMENTS)
main()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/evaluate/evaluate_perceptualsim.py | extern/ldm_zero123/modules/evaluate/evaluate_perceptualsim.py | import argparse
import glob
import os
from collections import namedtuple
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
from torchvision import models
from tqdm import tqdm
from extern.ldm_zero123.modules.evaluate.ssim import ssim
transform = transforms.Compose([transforms.ToTensor()])
def normalize_tensor(in_feat, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(in_feat**2, dim=1)).view(
in_feat.size()[0], 1, in_feat.size()[2], in_feat.size()[3]
)
return in_feat / (norm_factor.expand_as(in_feat) + eps)
def cos_sim(in0, in1):
in0_norm = normalize_tensor(in0)
in1_norm = normalize_tensor(in1)
N = in0.size()[0]
X = in0.size()[2]
Y = in0.size()[3]
return torch.mean(
torch.mean(torch.sum(in0_norm * in1_norm, dim=1).view(N, 1, X, Y), dim=2).view(
N, 1, 1, Y
),
dim=3,
).view(N)
class squeezenet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(squeezenet, self).__init__()
pretrained_features = models.squeezenet1_1(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.slice7 = torch.nn.Sequential()
self.N_slices = 7
for x in range(2):
self.slice1.add_module(str(x), pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), pretrained_features[x])
for x in range(10, 11):
self.slice5.add_module(str(x), pretrained_features[x])
for x in range(11, 12):
self.slice6.add_module(str(x), pretrained_features[x])
for x in range(12, 13):
self.slice7.add_module(str(x), pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
h = self.slice6(h)
h_relu6 = h
h = self.slice7(h)
h_relu7 = h
vgg_outputs = namedtuple(
"SqueezeOutputs",
["relu1", "relu2", "relu3", "relu4", "relu5", "relu6", "relu7"],
)
out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7)
return out
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple(
"AlexnetOutputs", ["relu1", "relu2", "relu3", "relu4", "relu5"]
)
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple(
"VggOutputs",
["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"],
)
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
class resnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True, num=18):
super(resnet, self).__init__()
if num == 18:
self.net = models.resnet18(pretrained=pretrained)
elif num == 34:
self.net = models.resnet34(pretrained=pretrained)
elif num == 50:
self.net = models.resnet50(pretrained=pretrained)
elif num == 101:
self.net = models.resnet101(pretrained=pretrained)
elif num == 152:
self.net = models.resnet152(pretrained=pretrained)
self.N_slices = 5
self.conv1 = self.net.conv1
self.bn1 = self.net.bn1
self.relu = self.net.relu
self.maxpool = self.net.maxpool
self.layer1 = self.net.layer1
self.layer2 = self.net.layer2
self.layer3 = self.net.layer3
self.layer4 = self.net.layer4
def forward(self, X):
h = self.conv1(X)
h = self.bn1(h)
h = self.relu(h)
h_relu1 = h
h = self.maxpool(h)
h = self.layer1(h)
h_conv2 = h
h = self.layer2(h)
h_conv3 = h
h = self.layer3(h)
h_conv4 = h
h = self.layer4(h)
h_conv5 = h
outputs = namedtuple("Outputs", ["relu1", "conv2", "conv3", "conv4", "conv5"])
out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
return out
# Off-the-shelf deep network
class PNet(torch.nn.Module):
"""Pre-trained network with all channels equally weighted by default"""
def __init__(self, pnet_type="vgg", pnet_rand=False, use_gpu=True):
super(PNet, self).__init__()
self.use_gpu = use_gpu
self.pnet_type = pnet_type
self.pnet_rand = pnet_rand
self.shift = torch.Tensor([-0.030, -0.088, -0.188]).view(1, 3, 1, 1)
self.scale = torch.Tensor([0.458, 0.448, 0.450]).view(1, 3, 1, 1)
if self.pnet_type in ["vgg", "vgg16"]:
self.net = vgg16(pretrained=not self.pnet_rand, requires_grad=False)
elif self.pnet_type == "alex":
self.net = alexnet(pretrained=not self.pnet_rand, requires_grad=False)
elif self.pnet_type[:-2] == "resnet":
self.net = resnet(
pretrained=not self.pnet_rand,
requires_grad=False,
num=int(self.pnet_type[-2:]),
)
elif self.pnet_type == "squeeze":
self.net = squeezenet(pretrained=not self.pnet_rand, requires_grad=False)
self.L = self.net.N_slices
if use_gpu:
self.net.cuda()
self.shift = self.shift.cuda()
self.scale = self.scale.cuda()
def forward(self, in0, in1, retPerLayer=False):
in0_sc = (in0 - self.shift.expand_as(in0)) / self.scale.expand_as(in0)
in1_sc = (in1 - self.shift.expand_as(in0)) / self.scale.expand_as(in0)
outs0 = self.net.forward(in0_sc)
outs1 = self.net.forward(in1_sc)
if retPerLayer:
all_scores = []
for kk, out0 in enumerate(outs0):
cur_score = 1.0 - cos_sim(outs0[kk], outs1[kk])
if kk == 0:
val = 1.0 * cur_score
else:
val = val + cur_score
if retPerLayer:
all_scores += [cur_score]
if retPerLayer:
return (val, all_scores)
else:
return val
# The SSIM metric
def ssim_metric(img1, img2, mask=None):
return ssim(img1, img2, mask=mask, size_average=False)
# The PSNR metric
def psnr(img1, img2, mask=None, reshape=False):
b = img1.size(0)
if not (mask is None):
b = img1.size(0)
mse_err = (img1 - img2).pow(2) * mask
if reshape:
mse_err = mse_err.reshape(b, -1).sum(dim=1) / (
3 * mask.reshape(b, -1).sum(dim=1).clamp(min=1)
)
else:
mse_err = mse_err.view(b, -1).sum(dim=1) / (
3 * mask.view(b, -1).sum(dim=1).clamp(min=1)
)
else:
if reshape:
mse_err = (img1 - img2).pow(2).reshape(b, -1).mean(dim=1)
else:
mse_err = (img1 - img2).pow(2).view(b, -1).mean(dim=1)
psnr = 10 * (1 / mse_err).log10()
return psnr
# The perceptual similarity metric
def perceptual_sim(img1, img2, vgg16):
# First extract features
dist = vgg16(img1 * 2 - 1, img2 * 2 - 1)
return dist
def load_img(img_name, size=None):
try:
img = Image.open(img_name)
if type(size) == int:
img = img.resize((size, size))
elif size is not None:
img = img.resize((size[1], size[0]))
img = transform(img).cuda()
img = img.unsqueeze(0)
except Exception as e:
print("Failed at loading %s " % img_name)
print(e)
img = torch.zeros(1, 3, 256, 256).cuda()
raise
return img
def compute_perceptual_similarity(folder, pred_img, tgt_img, take_every_other):
# Load VGG16 for feature similarity
vgg16 = PNet().to("cuda")
vgg16.eval()
vgg16.cuda()
values_percsim = []
values_ssim = []
values_psnr = []
folders = os.listdir(folder)
for i, f in tqdm(enumerate(sorted(folders))):
pred_imgs = glob.glob(folder + f + "/" + pred_img)
tgt_imgs = glob.glob(folder + f + "/" + tgt_img)
assert len(tgt_imgs) == 1
perc_sim = 10000
ssim_sim = -10
psnr_sim = -10
for p_img in pred_imgs:
t_img = load_img(tgt_imgs[0])
p_img = load_img(p_img, size=t_img.shape[2:])
t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item()
perc_sim = min(perc_sim, t_perc_sim)
ssim_sim = max(ssim_sim, ssim_metric(p_img, t_img).item())
psnr_sim = max(psnr_sim, psnr(p_img, t_img).item())
values_percsim += [perc_sim]
values_ssim += [ssim_sim]
values_psnr += [psnr_sim]
if take_every_other:
n_valuespercsim = []
n_valuesssim = []
n_valuespsnr = []
for i in range(0, len(values_percsim) // 2):
n_valuespercsim += [min(values_percsim[2 * i], values_percsim[2 * i + 1])]
n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])]
n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])]
values_percsim = n_valuespercsim
values_ssim = n_valuesssim
values_psnr = n_valuespsnr
avg_percsim = np.mean(np.array(values_percsim))
std_percsim = np.std(np.array(values_percsim))
avg_psnr = np.mean(np.array(values_psnr))
std_psnr = np.std(np.array(values_psnr))
avg_ssim = np.mean(np.array(values_ssim))
std_ssim = np.std(np.array(values_ssim))
return {
"Perceptual similarity": (avg_percsim, std_percsim),
"PSNR": (avg_psnr, std_psnr),
"SSIM": (avg_ssim, std_ssim),
}
def compute_perceptual_similarity_from_list(
pred_imgs_list, tgt_imgs_list, take_every_other, simple_format=True
):
# Load VGG16 for feature similarity
vgg16 = PNet().to("cuda")
vgg16.eval()
vgg16.cuda()
values_percsim = []
values_ssim = []
values_psnr = []
equal_count = 0
ambig_count = 0
for i, tgt_img in enumerate(tqdm(tgt_imgs_list)):
pred_imgs = pred_imgs_list[i]
tgt_imgs = [tgt_img]
assert len(tgt_imgs) == 1
if type(pred_imgs) != list:
pred_imgs = [pred_imgs]
perc_sim = 10000
ssim_sim = -10
psnr_sim = -10
assert len(pred_imgs) > 0
for p_img in pred_imgs:
t_img = load_img(tgt_imgs[0])
p_img = load_img(p_img, size=t_img.shape[2:])
t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item()
perc_sim = min(perc_sim, t_perc_sim)
ssim_sim = max(ssim_sim, ssim_metric(p_img, t_img).item())
psnr_sim = max(psnr_sim, psnr(p_img, t_img).item())
values_percsim += [perc_sim]
values_ssim += [ssim_sim]
if psnr_sim != np.float("inf"):
values_psnr += [psnr_sim]
else:
if torch.allclose(p_img, t_img):
equal_count += 1
print("{} equal src and wrp images.".format(equal_count))
else:
ambig_count += 1
print("{} ambiguous src and wrp images.".format(ambig_count))
if take_every_other:
n_valuespercsim = []
n_valuesssim = []
n_valuespsnr = []
for i in range(0, len(values_percsim) // 2):
n_valuespercsim += [min(values_percsim[2 * i], values_percsim[2 * i + 1])]
n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])]
n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])]
values_percsim = n_valuespercsim
values_ssim = n_valuesssim
values_psnr = n_valuespsnr
avg_percsim = np.mean(np.array(values_percsim))
std_percsim = np.std(np.array(values_percsim))
avg_psnr = np.mean(np.array(values_psnr))
std_psnr = np.std(np.array(values_psnr))
avg_ssim = np.mean(np.array(values_ssim))
std_ssim = np.std(np.array(values_ssim))
if simple_format:
# just to make yaml formatting readable
return {
"Perceptual similarity": [float(avg_percsim), float(std_percsim)],
"PSNR": [float(avg_psnr), float(std_psnr)],
"SSIM": [float(avg_ssim), float(std_ssim)],
}
else:
return {
"Perceptual similarity": (avg_percsim, std_percsim),
"PSNR": (avg_psnr, std_psnr),
"SSIM": (avg_ssim, std_ssim),
}
def compute_perceptual_similarity_from_list_topk(
pred_imgs_list, tgt_imgs_list, take_every_other, resize=False
):
# Load VGG16 for feature similarity
vgg16 = PNet().to("cuda")
vgg16.eval()
vgg16.cuda()
values_percsim = []
values_ssim = []
values_psnr = []
individual_percsim = []
individual_ssim = []
individual_psnr = []
for i, tgt_img in enumerate(tqdm(tgt_imgs_list)):
pred_imgs = pred_imgs_list[i]
tgt_imgs = [tgt_img]
assert len(tgt_imgs) == 1
if type(pred_imgs) != list:
assert False
pred_imgs = [pred_imgs]
perc_sim = 10000
ssim_sim = -10
psnr_sim = -10
sample_percsim = list()
sample_ssim = list()
sample_psnr = list()
for p_img in pred_imgs:
if resize:
t_img = load_img(tgt_imgs[0], size=(256, 256))
else:
t_img = load_img(tgt_imgs[0])
p_img = load_img(p_img, size=t_img.shape[2:])
t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item()
sample_percsim.append(t_perc_sim)
perc_sim = min(perc_sim, t_perc_sim)
t_ssim = ssim_metric(p_img, t_img).item()
sample_ssim.append(t_ssim)
ssim_sim = max(ssim_sim, t_ssim)
t_psnr = psnr(p_img, t_img).item()
sample_psnr.append(t_psnr)
psnr_sim = max(psnr_sim, t_psnr)
values_percsim += [perc_sim]
values_ssim += [ssim_sim]
values_psnr += [psnr_sim]
individual_percsim.append(sample_percsim)
individual_ssim.append(sample_ssim)
individual_psnr.append(sample_psnr)
if take_every_other:
assert False, "Do this later, after specifying topk to get proper results"
n_valuespercsim = []
n_valuesssim = []
n_valuespsnr = []
for i in range(0, len(values_percsim) // 2):
n_valuespercsim += [min(values_percsim[2 * i], values_percsim[2 * i + 1])]
n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])]
n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])]
values_percsim = n_valuespercsim
values_ssim = n_valuesssim
values_psnr = n_valuespsnr
avg_percsim = np.mean(np.array(values_percsim))
std_percsim = np.std(np.array(values_percsim))
avg_psnr = np.mean(np.array(values_psnr))
std_psnr = np.std(np.array(values_psnr))
avg_ssim = np.mean(np.array(values_ssim))
std_ssim = np.std(np.array(values_ssim))
individual_percsim = np.array(individual_percsim)
individual_psnr = np.array(individual_psnr)
individual_ssim = np.array(individual_ssim)
return {
"avg_of_best": {
"Perceptual similarity": [float(avg_percsim), float(std_percsim)],
"PSNR": [float(avg_psnr), float(std_psnr)],
"SSIM": [float(avg_ssim), float(std_ssim)],
},
"individual": {
"PSIM": individual_percsim,
"PSNR": individual_psnr,
"SSIM": individual_ssim,
},
}
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("--folder", type=str, default="")
args.add_argument("--pred_image", type=str, default="")
args.add_argument("--target_image", type=str, default="")
args.add_argument("--take_every_other", action="store_true", default=False)
args.add_argument("--output_file", type=str, default="")
opts = args.parse_args()
folder = opts.folder
pred_img = opts.pred_image
tgt_img = opts.target_image
results = compute_perceptual_similarity(
folder, pred_img, tgt_img, opts.take_every_other
)
f = open(opts.output_file, "w")
for key in results:
print("%s for %s: \n" % (key, opts.folder))
print("\t {:0.4f} | {:0.4f} \n".format(results[key][0], results[key][1]))
f.write("%s for %s: \n" % (key, opts.folder))
f.write("\t {:0.4f} | {:0.4f} \n".format(results[key][0], results[key][1]))
f.close()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/diffusionmodules/util.py | extern/ldm_zero123/modules/diffusionmodules/util.py | # adopted from
# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
# and
# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
# and
# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
#
# thanks!
import math
import os
import numpy as np
import torch
import torch.nn as nn
from einops import repeat
from extern.ldm_zero123.util import instantiate_from_config
def make_beta_schedule(
schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
):
if schedule == "linear":
betas = (
torch.linspace(
linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64
)
** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(
linear_start, linear_end, n_timestep, dtype=torch.float64
)
elif schedule == "sqrt":
betas = (
torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
** 0.5
)
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def make_ddim_timesteps(
ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
):
if ddim_discr_method == "uniform":
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == "quad":
ddim_timesteps = (
(np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
).astype(int)
else:
raise NotImplementedError(
f'There is no ddim discretization method called "{ddim_discr_method}"'
)
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f"Selected timesteps for ddim sampler: {steps_out}")
return steps_out
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt(
(1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
)
if verbose:
print(
f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
)
print(
f"For the chosen value of eta, which is {eta}, "
f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
)
return sigmas, alphas, alphas_prev
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with torch.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = torch.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period)
* torch.arange(start=0, end=half, dtype=torch.float32)
/ half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat(
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1
)
else:
embedding = repeat(timesteps, "b -> b d", d=dim)
return embedding
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
class HybridConditioner(nn.Module):
def __init__(self, c_concat_config, c_crossattn_config):
super().__init__()
self.concat_conditioner = instantiate_from_config(c_concat_config)
self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
def forward(self, c_concat, c_crossattn):
c_concat = self.concat_conditioner(c_concat)
c_crossattn = self.crossattn_conditioner(c_crossattn)
return {"c_concat": [c_concat], "c_crossattn": [c_crossattn]}
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
shape[0], *((1,) * (len(shape) - 1))
)
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/diffusionmodules/model.py | extern/ldm_zero123/modules/diffusionmodules/model.py | # pytorch_diffusion + derived encoder decoder
import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from extern.ldm_zero123.modules.attention import LinearAttention
from extern.ldm_zero123.util import instantiate_from_config
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
def nonlinearity(x):
# swish
return x * torch.sigmoid(x)
def Normalize(in_channels, num_groups=32):
return torch.nn.GroupNorm(
num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True
)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=2, padding=0
)
def forward(self, x):
if self.with_conv:
pad = (0, 1, 0, 1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout,
temb_channels=512,
):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels, out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
else:
self.nin_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=1, stride=1, padding=0
)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x + h
class LinAttnBlock(LinearAttention):
"""to match AttnBlock usage"""
def __init__(self, in_channels):
super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.k = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.v = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.proj_out = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b, c, h, w = q.shape
q = q.reshape(b, c, h * w)
q = q.permute(0, 2, 1) # b,hw,c
k = k.reshape(b, c, h * w) # b,c,hw
w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c) ** (-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b, c, h * w)
w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b, c, h, w)
h_ = self.proj_out(h_)
return x + h_
def make_attn(in_channels, attn_type="vanilla"):
assert attn_type in ["vanilla", "linear", "none"], f"attn_type {attn_type} unknown"
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
if attn_type == "vanilla":
return AttnBlock(in_channels)
elif attn_type == "none":
return nn.Identity(in_channels)
else:
return LinAttnBlock(in_channels)
class Model(nn.Module):
def __init__(
self,
*,
ch,
out_ch,
ch_mult=(1, 2, 4, 8),
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
in_channels,
resolution,
use_timestep=True,
use_linear_attn=False,
attn_type="vanilla",
):
super().__init__()
if use_linear_attn:
attn_type = "linear"
self.ch = ch
self.temb_ch = self.ch * 4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList(
[
torch.nn.Linear(self.ch, self.temb_ch),
torch.nn.Linear(self.temb_ch, self.temb_ch),
]
)
# downsampling
self.conv_in = torch.nn.Conv2d(
in_channels, self.ch, kernel_size=3, stride=1, padding=1
)
curr_res = resolution
in_ch_mult = (1,) + tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch * in_ch_mult[i_level]
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch * ch_mult[i_level]
skip_in = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
if i_block == self.num_res_blocks:
skip_in = ch * in_ch_mult[i_level]
block.append(
ResnetBlock(
in_channels=block_in + skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in, out_ch, kernel_size=3, stride=1, padding=1
)
def forward(self, x, t=None, context=None):
# assert x.shape[2] == x.shape[3] == self.resolution
if context is not None:
# assume aligned context, cat along channel axis
x = torch.cat((x, context), dim=1)
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb
)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
def get_last_layer(self):
return self.conv_out.weight
class Encoder(nn.Module):
def __init__(
self,
*,
ch,
out_ch,
ch_mult=(1, 2, 4, 8),
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
in_channels,
resolution,
z_channels,
double_z=True,
use_linear_attn=False,
attn_type="vanilla",
**ignore_kwargs,
):
super().__init__()
if use_linear_attn:
attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(
in_channels, self.ch, kernel_size=3, stride=1, padding=1
)
curr_res = resolution
in_ch_mult = (1,) + tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch * in_ch_mult[i_level]
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in,
2 * z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1,
)
def forward(self, x):
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(
self,
*,
ch,
out_ch,
ch_mult=(1, 2, 4, 8),
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
in_channels,
resolution,
z_channels,
give_pre_end=False,
tanh_out=False,
use_linear_attn=False,
attn_type="vanilla",
**ignorekwargs,
):
super().__init__()
if use_linear_attn:
attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,) + tuple(ch_mult)
block_in = ch * ch_mult[self.num_resolutions - 1]
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
print(
"Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)
)
)
# z to block_in
self.conv_in = torch.nn.Conv2d(
z_channels, block_in, kernel_size=3, stride=1, padding=1
)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in, out_ch, kernel_size=3, stride=1, padding=1
)
def forward(self, z):
# assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
if self.tanh_out:
h = torch.tanh(h)
return h
class SimpleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__()
self.model = nn.ModuleList(
[
nn.Conv2d(in_channels, in_channels, 1),
ResnetBlock(
in_channels=in_channels,
out_channels=2 * in_channels,
temb_channels=0,
dropout=0.0,
),
ResnetBlock(
in_channels=2 * in_channels,
out_channels=4 * in_channels,
temb_channels=0,
dropout=0.0,
),
ResnetBlock(
in_channels=4 * in_channels,
out_channels=2 * in_channels,
temb_channels=0,
dropout=0.0,
),
nn.Conv2d(2 * in_channels, in_channels, 1),
Upsample(in_channels, with_conv=True),
]
)
# end
self.norm_out = Normalize(in_channels)
self.conv_out = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
for i, layer in enumerate(self.model):
if i in [1, 2, 3]:
x = layer(x, None)
else:
x = layer(x)
h = self.norm_out(x)
h = nonlinearity(h)
x = self.conv_out(h)
return x
class UpsampleDecoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
ch,
num_res_blocks,
resolution,
ch_mult=(2, 2),
dropout=0.0,
):
super().__init__()
# upsampling
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
block_in = in_channels
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.res_blocks = nn.ModuleList()
self.upsample_blocks = nn.ModuleList()
for i_level in range(self.num_resolutions):
res_block = []
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
res_block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
self.res_blocks.append(nn.ModuleList(res_block))
if i_level != self.num_resolutions - 1:
self.upsample_blocks.append(Upsample(block_in, True))
curr_res = curr_res * 2
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in, out_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
# upsampling
h = x
for k, i_level in enumerate(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.res_blocks[i_level][i_block](h, None)
if i_level != self.num_resolutions - 1:
h = self.upsample_blocks[k](h)
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class LatentRescaler(nn.Module):
def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
super().__init__()
# residual block, interpolate, residual block
self.factor = factor
self.conv_in = nn.Conv2d(
in_channels, mid_channels, kernel_size=3, stride=1, padding=1
)
self.res_block1 = nn.ModuleList(
[
ResnetBlock(
in_channels=mid_channels,
out_channels=mid_channels,
temb_channels=0,
dropout=0.0,
)
for _ in range(depth)
]
)
self.attn = AttnBlock(mid_channels)
self.res_block2 = nn.ModuleList(
[
ResnetBlock(
in_channels=mid_channels,
out_channels=mid_channels,
temb_channels=0,
dropout=0.0,
)
for _ in range(depth)
]
)
self.conv_out = nn.Conv2d(
mid_channels,
out_channels,
kernel_size=1,
)
def forward(self, x):
x = self.conv_in(x)
for block in self.res_block1:
x = block(x, None)
x = torch.nn.functional.interpolate(
x,
size=(
int(round(x.shape[2] * self.factor)),
int(round(x.shape[3] * self.factor)),
),
)
x = self.attn(x)
for block in self.res_block2:
x = block(x, None)
x = self.conv_out(x)
return x
class MergedRescaleEncoder(nn.Module):
def __init__(
self,
in_channels,
ch,
resolution,
out_ch,
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
ch_mult=(1, 2, 4, 8),
rescale_factor=1.0,
rescale_module_depth=1,
):
super().__init__()
intermediate_chn = ch * ch_mult[-1]
self.encoder = Encoder(
in_channels=in_channels,
num_res_blocks=num_res_blocks,
ch=ch,
ch_mult=ch_mult,
z_channels=intermediate_chn,
double_z=False,
resolution=resolution,
attn_resolutions=attn_resolutions,
dropout=dropout,
resamp_with_conv=resamp_with_conv,
out_ch=None,
)
self.rescaler = LatentRescaler(
factor=rescale_factor,
in_channels=intermediate_chn,
mid_channels=intermediate_chn,
out_channels=out_ch,
depth=rescale_module_depth,
)
def forward(self, x):
x = self.encoder(x)
x = self.rescaler(x)
return x
class MergedRescaleDecoder(nn.Module):
def __init__(
self,
z_channels,
out_ch,
resolution,
num_res_blocks,
attn_resolutions,
ch,
ch_mult=(1, 2, 4, 8),
dropout=0.0,
resamp_with_conv=True,
rescale_factor=1.0,
rescale_module_depth=1,
):
super().__init__()
tmp_chn = z_channels * ch_mult[-1]
self.decoder = Decoder(
out_ch=out_ch,
z_channels=tmp_chn,
attn_resolutions=attn_resolutions,
dropout=dropout,
resamp_with_conv=resamp_with_conv,
in_channels=None,
num_res_blocks=num_res_blocks,
ch_mult=ch_mult,
resolution=resolution,
ch=ch,
)
self.rescaler = LatentRescaler(
factor=rescale_factor,
in_channels=z_channels,
mid_channels=tmp_chn,
out_channels=tmp_chn,
depth=rescale_module_depth,
)
def forward(self, x):
x = self.rescaler(x)
x = self.decoder(x)
return x
class Upsampler(nn.Module):
def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
super().__init__()
assert out_size >= in_size
num_blocks = int(np.log2(out_size // in_size)) + 1
factor_up = 1.0 + (out_size % in_size)
print(
f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}"
)
self.rescaler = LatentRescaler(
factor=factor_up,
in_channels=in_channels,
mid_channels=2 * in_channels,
out_channels=in_channels,
)
self.decoder = Decoder(
out_ch=out_channels,
resolution=out_size,
z_channels=in_channels,
num_res_blocks=2,
attn_resolutions=[],
in_channels=None,
ch=in_channels,
ch_mult=[ch_mult for _ in range(num_blocks)],
)
def forward(self, x):
x = self.rescaler(x)
x = self.decoder(x)
return x
class Resize(nn.Module):
def __init__(self, in_channels=None, learned=False, mode="bilinear"):
super().__init__()
self.with_conv = learned
self.mode = mode
if self.with_conv:
print(
f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode"
)
raise NotImplementedError()
assert in_channels is not None
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=4, stride=2, padding=1
)
def forward(self, x, scale_factor=1.0):
if scale_factor == 1.0:
return x
else:
x = torch.nn.functional.interpolate(
x, mode=self.mode, align_corners=False, scale_factor=scale_factor
)
return x
class FirstStagePostProcessor(nn.Module):
def __init__(
self,
ch_mult: list,
in_channels,
pretrained_model: nn.Module = None,
reshape=False,
n_channels=None,
dropout=0.0,
pretrained_config=None,
):
super().__init__()
if pretrained_config is None:
assert (
pretrained_model is not None
), 'Either "pretrained_model" or "pretrained_config" must not be None'
self.pretrained_model = pretrained_model
else:
assert (
pretrained_config is not None
), 'Either "pretrained_model" or "pretrained_config" must not be None'
self.instantiate_pretrained(pretrained_config)
self.do_reshape = reshape
if n_channels is None:
n_channels = self.pretrained_model.encoder.ch
self.proj_norm = Normalize(in_channels, num_groups=in_channels // 2)
self.proj = nn.Conv2d(
in_channels, n_channels, kernel_size=3, stride=1, padding=1
)
blocks = []
downs = []
ch_in = n_channels
for m in ch_mult:
blocks.append(
ResnetBlock(
in_channels=ch_in, out_channels=m * n_channels, dropout=dropout
)
)
ch_in = m * n_channels
downs.append(Downsample(ch_in, with_conv=False))
self.model = nn.ModuleList(blocks)
self.downsampler = nn.ModuleList(downs)
def instantiate_pretrained(self, config):
model = instantiate_from_config(config)
self.pretrained_model = model.eval()
# self.pretrained_model.train = False
for param in self.pretrained_model.parameters():
param.requires_grad = False
@torch.no_grad()
def encode_with_pretrained(self, x):
c = self.pretrained_model.encode(x)
if isinstance(c, DiagonalGaussianDistribution):
c = c.mode()
return c
def forward(self, x):
z_fs = self.encode_with_pretrained(x)
z = self.proj_norm(z_fs)
z = self.proj(z)
z = nonlinearity(z)
for submodel, downmodel in zip(self.model, self.downsampler):
z = submodel(z, temb=None)
z = downmodel(z)
if self.do_reshape:
z = rearrange(z, "b c h w -> b (h w) c")
return z
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/diffusionmodules/openaimodel.py | extern/ldm_zero123/modules/diffusionmodules/openaimodel.py | import math
from abc import abstractmethod
from functools import partial
from typing import Iterable
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from extern.ldm_zero123.modules.attention import SpatialTransformer
from extern.ldm_zero123.modules.diffusionmodules.util import (
avg_pool_nd,
checkpoint,
conv_nd,
linear,
normalization,
timestep_embedding,
zero_module,
)
from extern.ldm_zero123.util import exists
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(
dims, self.channels, self.out_channels, 3, padding=padding
)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
"Learned 2x upsampling without padding"
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(
self.channels, self.out_channels, kernel_size=ks, stride=2
)
def forward(self, x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims,
self.channels,
self.out_channels,
3,
stride=stride,
padding=padding,
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(
self._forward, (x,), self.parameters(), True
) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
# return pt_checkpoint(self._forward, x) # pytorch
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial**2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/output heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
use_spatial_transformer=False, # custom transformer support
transformer_depth=1, # custom transformer support
context_dim=None, # custom transformer support
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
legacy=True,
disable_self_attentions=None,
num_attention_blocks=None,
):
super().__init__()
if use_spatial_transformer:
assert (
context_dim is not None
), "Fool!! You forgot to include the dimension of your cross-attention conditioning..."
if context_dim is not None:
assert (
use_spatial_transformer
), "Fool!! You forgot to use the spatial transformer for your cross-attention conditioning..."
from omegaconf.listconfig import ListConfig
if type(context_dim) == ListConfig:
context_dim = list(context_dim)
if num_heads_upsample == -1:
num_heads_upsample = num_heads
if num_heads == -1:
assert (
num_head_channels != -1
), "Either num_heads or num_head_channels has to be set"
if num_head_channels == -1:
assert (
num_heads != -1
), "Either num_heads or num_head_channels has to be set"
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
if isinstance(num_res_blocks, int):
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
else:
if len(num_res_blocks) != len(channel_mult):
raise ValueError(
"provide num_res_blocks either as an int (globally constant) or "
"as a list/tuple (per-level) with the same length as channel_mult"
)
self.num_res_blocks = num_res_blocks
# self.num_res_blocks = num_res_blocks
if disable_self_attentions is not None:
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
assert len(disable_self_attentions) == len(channel_mult)
if num_attention_blocks is not None:
assert len(num_attention_blocks) == len(self.num_res_blocks)
assert all(
map(
lambda i: self.num_res_blocks[i] >= num_attention_blocks[i],
range(len(num_attention_blocks)),
)
)
print(
f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
f"attention will still not be set."
) # todo: convert to warning
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.predict_codebook_ids = n_embed is not None
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for nr in range(self.num_res_blocks[level]):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
# num_heads = 1
dim_head = (
ch // num_heads
if use_spatial_transformer
else num_head_channels
)
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if (
not exists(num_attention_blocks)
or nr < num_attention_blocks[level]
):
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
)
if not use_spatial_transformer
else SpatialTransformer(
ch,
num_heads,
dim_head,
depth=transformer_depth,
context_dim=context_dim,
disable_self_attn=disabled_sa,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
# num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
)
if not use_spatial_transformer
else SpatialTransformer( # always uses a self-attn
ch,
num_heads,
dim_head,
depth=transformer_depth,
context_dim=context_dim,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(self.num_res_blocks[level] + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
# num_heads = 1
dim_head = (
ch // num_heads
if use_spatial_transformer
else num_head_channels
)
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if (
not exists(num_attention_blocks)
or i < num_attention_blocks[level]
):
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
)
if not use_spatial_transformer
else SpatialTransformer(
ch,
num_heads,
dim_head,
depth=transformer_depth,
context_dim=context_dim,
disable_self_attn=disabled_sa,
)
)
if level and i == self.num_res_blocks[level]:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
)
if self.predict_codebook_ids:
self.id_predictor = nn.Sequential(
normalization(ch),
conv_nd(dims, model_channels, n_embed, 1),
# nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps=None, context=None, y=None, **kwargs):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param context: conditioning plugged in via crossattn
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
emb = self.time_embed(t_emb)
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb, context)
hs.append(h)
h = self.middle_block(h, emb, context)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb, context)
h = h.type(x.dtype)
if self.predict_codebook_ids:
return self.id_predictor(h)
else:
return self.out(h)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
*args,
**kwargs,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | true |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/extern/ldm_zero123/modules/diffusionmodules/__init__.py | extern/ldm_zero123/modules/diffusionmodules/__init__.py | python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false | |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/__init__.py | threestudio/__init__.py | __modules__ = {}
def register(name):
def decorator(cls):
__modules__[name] = cls
return cls
return decorator
def find(name):
return __modules__[name]
### grammar sugar for logging utilities ###
import logging
logger = logging.getLogger("pytorch_lightning")
from pytorch_lightning.utilities.rank_zero import (
rank_zero_debug,
rank_zero_info,
rank_zero_only,
)
debug = rank_zero_debug
info = rank_zero_info
@rank_zero_only
def warn(*args, **kwargs):
logger.warn(*args, **kwargs)
from . import data, models, systems
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/scripts/zero123_demo.py | threestudio/scripts/zero123_demo.py | # 1. Generate using StableDiffusionXL https://clipdrop.co/stable-diffusion
# 2. Remove background https://clipdrop.co/remove-background
# 3. Resize to 512x512 https://www.iloveimg.com/resize-image
# (OPTIONAL)
# 4. Estimate depth and normal https://omnidata.vision/demo/ (I used Omnidata Normal (with X-TC & 3DCC), and MiDaS Depth)
# (OPTIONAL)
# 5. Convert depth image from RGB to greyscale
def depth_rgb_to_grey(depth_filename):
# depth_filename = "image_depth.png"
import cv2
import numpy as np
# import shutil
# shutil.copyfile(depth_filename, depth_filename.replace("_depth", "_depth_orig"))
depth = cv2.imread(depth_filename)
depth = cv2.cvtColor(depth, cv2.COLOR_BGR2GRAY)
mask = (
cv2.resize(
cv2.imread(depth_filename.replace("_depth", "_rgba"), cv2.IMREAD_UNCHANGED)[
:, :, -1
],
depth.shape,
)
> 0
)
# depth[mask] = (depth[mask] - depth.min()) / (depth.max() - depth.min() + 1e-9)
depth = (depth - depth.min()) / (depth.max() - depth.min() + 1e-9)
depth[~mask] = 0
depth = (depth * 255).astype(np.uint8)
cv2.imwrite(depth_filename, depth)
# (OPTIONAL)
# 6. Mask normal
def normal_mask(normal_filename):
# filename = "image_normal.png"
import cv2
# import shutil
# shutil.copyfile(normal_filename, normal_filename.replace("_normal", "_normal_orig"))
normal = cv2.imread(normal_filename)
mask = (
cv2.resize(
cv2.imread(
normal_filename.replace("_normal", "_rgba"), cv2.IMREAD_UNCHANGED
)[:, :, -1],
normal.shape[:2],
)
> 0
)
normal[~mask] = 0
cv2.imwrite(normal_filename, normal)
# 5. Run Zero123
# python launch.py --config configs/zero123.yaml --train data.image_path=./load/images/grootplant_rgba.png
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/scripts/make_training_vid.py | threestudio/scripts/make_training_vid.py | # make_training_vid("outputs/zero123/64_teddy_rgba.png@20230627-195615", frames_per_vid=30, fps=20, max_iters=200)
import argparse
import glob
import os
import imageio
import numpy as np
from PIL import Image, ImageDraw
from tqdm import tqdm
def draw_text_in_image(img, texts):
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
black, white = (0, 0, 0), (255, 255, 255)
for i, text in enumerate(texts):
draw.text((2, (img.size[1] // len(texts)) * i + 1), f"{text}", white)
draw.text((0, (img.size[1] // len(texts)) * i + 1), f"{text}", white)
draw.text((2, (img.size[1] // len(texts)) * i - 1), f"{text}", white)
draw.text((0, (img.size[1] // len(texts)) * i - 1), f"{text}", white)
draw.text((1, (img.size[1] // len(texts)) * i), f"{text}", black)
return np.asarray(img)
def make_training_vid(exp, frames_per_vid=1, fps=3, max_iters=None, max_vids=None):
# exp = "/admin/home-vikram/git/threestudio/outputs/zero123/64_teddy_rgba.png@20230627-195615"
files = glob.glob(os.path.join(exp, "save", "*.mp4"))
if os.path.join(exp, "save", "training_vid.mp4") in files:
files.remove(os.path.join(exp, "save", "training_vid.mp4"))
its = [int(os.path.basename(file).split("-")[0].split("it")[-1]) for file in files]
it_sort = np.argsort(its)
files = list(np.array(files)[it_sort])
its = list(np.array(its)[it_sort])
max_vids = max_iters // its[0] if max_iters is not None else max_vids
files, its = files[:max_vids], its[:max_vids]
frames, i = [], 0
for it, file in tqdm(zip(its, files), total=len(files)):
vid = imageio.mimread(file)
for _ in range(frames_per_vid):
frame = vid[i % len(vid)]
frame = draw_text_in_image(frame, [str(it)])
frames.append(frame)
i += 1
# Save
imageio.mimwrite(os.path.join(exp, "save", "training_vid.mp4"), frames, fps=fps)
def join(file1, file2, name):
# file1 = "/admin/home-vikram/git/threestudio/outputs/zero123/OLD_64_dragon2_rgba.png@20230629-023028/save/it200-val.mp4"
# file2 = "/admin/home-vikram/git/threestudio/outputs/zero123/64_dragon2_rgba.png@20230628-152734/save/it200-val.mp4"
vid1 = imageio.mimread(file1)
vid2 = imageio.mimread(file2)
frames = []
for f1, f2 in zip(vid1, vid2):
frames.append(
np.concatenate([f1[:, : f1.shape[0]], f2[:, : f2.shape[0]]], axis=1)
)
imageio.mimwrite(name, frames)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--exp", help="directory of experiment")
parser.add_argument(
"--frames_per_vid", type=int, default=1, help="# of frames from each val vid"
)
parser.add_argument("--fps", type=int, help="max # of iters to save")
parser.add_argument("--max_iters", type=int, help="max # of iters to save")
parser.add_argument(
"--max_vids",
type=int,
help="max # of val videos to save. Will be overridden by max_iters",
)
args = parser.parse_args()
make_training_vid(
args.exp, args.frames_per_vid, args.fps, args.max_iters, args.max_vids
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/scripts/run_zero123_sbatch.py | threestudio/scripts/run_zero123_sbatch.py | import os
import time
files = [
"~/git/threestudio/load/images/dog1_rgba.png",
"~/git/threestudio/load/images/dragon2_rgba.png",
]
for file in files:
name = os.path.basename(file).split("_rgba.png")[0]
with open(
os.path.expanduser("~/git/threestudio/threestudio/scripts/zero123_sbatch.sh"),
"w",
) as f:
f.write("#!/bin/bash\n")
f.write(f"#SBATCH --job-name=vikky_{name}\n")
f.write("#SBATCH --account=mod3d\n")
f.write("#SBATCH --partition=g40\n")
f.write("#SBATCH --gpus=1\n")
f.write("#SBATCH --time=0-00:07:00\n")
f.write("conda activate three\n")
f.write("cd ~/git/threestudio/\n")
f.write(f"NAME={name}\n")
# Phase 1
f.write(
"python launch.py --config configs/zero123.yaml --train data.image_path=./load/images/${NAME}_rgba.png use_timestamp=true name=${NAME} tag=Phase1 system.loggers.wandb.enable=false system.loggers.wandb.project='zero123' system.loggers.wandb.name=${NAME}_Phase1\n"
)
# # Phase 1.5
# f.write(
# "python launch.py --config configs/zero123-geometry.yaml --train data.image_path=./load/images/${NAME}_rgba.png system.geometry_convert_from=./outputs/${NAME}/Phase1/ckpts/last.ckpt use_timestamp=False name=${NAME} tag=Phase1p5 system.loggers.wandb.enable=true system.loggers.wandb.project='zero123' system.loggers.wandb.name=${NAME}_Phase1p5\n"
# )
os.system("sbatch ~/git/threestudio/threestudio/scripts/zero123_sbatch.sh")
time.sleep(1)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/estimators.py | threestudio/models/estimators.py | from typing import Callable, List, Optional, Tuple
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import torch
from nerfacc.data_specs import RayIntervals
from nerfacc.estimators.base import AbstractEstimator
from nerfacc.pdf import importance_sampling, searchsorted
from nerfacc.volrend import render_transmittance_from_density
from torch import Tensor
class ImportanceEstimator(AbstractEstimator):
def __init__(
self,
) -> None:
super().__init__()
@torch.no_grad()
def sampling(
self,
prop_sigma_fns: List[Callable],
prop_samples: List[int],
num_samples: int,
# rendering options
n_rays: int,
near_plane: float,
far_plane: float,
sampling_type: Literal["uniform", "lindisp"] = "uniform",
# training options
stratified: bool = False,
requires_grad: bool = False,
) -> Tuple[Tensor, Tensor]:
"""Sampling with CDFs from proposal networks.
Args:
prop_sigma_fns: Proposal network evaluate functions. It should be a list
of functions that take in samples {t_starts (n_rays, n_samples),
t_ends (n_rays, n_samples)} and returns the post-activation densities
(n_rays, n_samples).
prop_samples: Number of samples to draw from each proposal network. Should
be the same length as `prop_sigma_fns`.
num_samples: Number of samples to draw in the end.
n_rays: Number of rays.
near_plane: Near plane.
far_plane: Far plane.
sampling_type: Sampling type. Either "uniform" or "lindisp". Default to
"lindisp".
stratified: Whether to use stratified sampling. Default to `False`.
Returns:
A tuple of {Tensor, Tensor}:
- **t_starts**: The starts of the samples. Shape (n_rays, num_samples).
- **t_ends**: The ends of the samples. Shape (n_rays, num_samples).
"""
assert len(prop_sigma_fns) == len(prop_samples), (
"The number of proposal networks and the number of samples "
"should be the same."
)
cdfs = torch.cat(
[
torch.zeros((n_rays, 1), device=self.device),
torch.ones((n_rays, 1), device=self.device),
],
dim=-1,
)
intervals = RayIntervals(vals=cdfs)
for level_fn, level_samples in zip(prop_sigma_fns, prop_samples):
intervals, _ = importance_sampling(
intervals, cdfs, level_samples, stratified
)
t_vals = _transform_stot(
sampling_type, intervals.vals, near_plane, far_plane
)
t_starts = t_vals[..., :-1]
t_ends = t_vals[..., 1:]
with torch.set_grad_enabled(requires_grad):
sigmas = level_fn(t_starts, t_ends)
assert sigmas.shape == t_starts.shape
trans, _ = render_transmittance_from_density(t_starts, t_ends, sigmas)
cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[:, :1])], dim=-1)
intervals, _ = importance_sampling(intervals, cdfs, num_samples, stratified)
t_vals_fine = _transform_stot(
sampling_type, intervals.vals, near_plane, far_plane
)
t_vals = torch.cat([t_vals, t_vals_fine], dim=-1)
t_vals, _ = torch.sort(t_vals, dim=-1)
t_starts_ = t_vals[..., :-1]
t_ends_ = t_vals[..., 1:]
return t_starts_, t_ends_
def _transform_stot(
transform_type: Literal["uniform", "lindisp"],
s_vals: torch.Tensor,
t_min: torch.Tensor,
t_max: torch.Tensor,
) -> torch.Tensor:
if transform_type == "uniform":
_contract_fn, _icontract_fn = lambda x: x, lambda x: x
elif transform_type == "lindisp":
_contract_fn, _icontract_fn = lambda x: 1 / x, lambda x: 1 / x
else:
raise ValueError(f"Unknown transform_type: {transform_type}")
s_min, s_max = _contract_fn(t_min), _contract_fn(t_max)
icontract_fn = lambda s: _icontract_fn(s * s_max + (1 - s) * s_min)
return icontract_fn(s_vals)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/isosurface.py | threestudio/models/isosurface.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.mesh import Mesh
from threestudio.utils.typing import *
class IsosurfaceHelper(nn.Module):
points_range: Tuple[float, float] = (0, 1)
@property
def grid_vertices(self) -> Float[Tensor, "N 3"]:
raise NotImplementedError
class MarchingCubeCPUHelper(IsosurfaceHelper):
def __init__(self, resolution: int) -> None:
super().__init__()
self.resolution = resolution
import mcubes
self.mc_func: Callable = mcubes.marching_cubes
self._grid_vertices: Optional[Float[Tensor, "N3 3"]] = None
self._dummy: Float[Tensor, "..."]
self.register_buffer(
"_dummy", torch.zeros(0, dtype=torch.float32), persistent=False
)
@property
def grid_vertices(self) -> Float[Tensor, "N3 3"]:
if self._grid_vertices is None:
# keep the vertices on CPU so that we can support very large resolution
x, y, z = (
torch.linspace(*self.points_range, self.resolution),
torch.linspace(*self.points_range, self.resolution),
torch.linspace(*self.points_range, self.resolution),
)
x, y, z = torch.meshgrid(x, y, z, indexing="ij")
verts = torch.cat(
[x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], dim=-1
).reshape(-1, 3)
self._grid_vertices = verts
return self._grid_vertices
def forward(
self,
level: Float[Tensor, "N3 1"],
deformation: Optional[Float[Tensor, "N3 3"]] = None,
) -> Mesh:
if deformation is not None:
threestudio.warn(
f"{self.__class__.__name__} does not support deformation. Ignoring."
)
level = -level.view(self.resolution, self.resolution, self.resolution)
v_pos, t_pos_idx = self.mc_func(
level.detach().cpu().numpy(), 0.0
) # transform to numpy
v_pos, t_pos_idx = (
torch.from_numpy(v_pos).float().to(self._dummy.device),
torch.from_numpy(t_pos_idx.astype(np.int64)).long().to(self._dummy.device),
) # transform back to torch tensor on CUDA
v_pos = v_pos / (self.resolution - 1.0)
return Mesh(v_pos=v_pos, t_pos_idx=t_pos_idx)
class MarchingTetrahedraHelper(IsosurfaceHelper):
def __init__(self, resolution: int, tets_path: str):
super().__init__()
self.resolution = resolution
self.tets_path = tets_path
self.triangle_table: Float[Tensor, "..."]
self.register_buffer(
"triangle_table",
torch.as_tensor(
[
[-1, -1, -1, -1, -1, -1],
[1, 0, 2, -1, -1, -1],
[4, 0, 3, -1, -1, -1],
[1, 4, 2, 1, 3, 4],
[3, 1, 5, -1, -1, -1],
[2, 3, 0, 2, 5, 3],
[1, 4, 0, 1, 5, 4],
[4, 2, 5, -1, -1, -1],
[4, 5, 2, -1, -1, -1],
[4, 1, 0, 4, 5, 1],
[3, 2, 0, 3, 5, 2],
[1, 3, 5, -1, -1, -1],
[4, 1, 2, 4, 3, 1],
[3, 0, 4, -1, -1, -1],
[2, 0, 1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
],
dtype=torch.long,
),
persistent=False,
)
self.num_triangles_table: Integer[Tensor, "..."]
self.register_buffer(
"num_triangles_table",
torch.as_tensor(
[0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long
),
persistent=False,
)
self.base_tet_edges: Integer[Tensor, "..."]
self.register_buffer(
"base_tet_edges",
torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),
persistent=False,
)
tets = np.load(self.tets_path)
self._grid_vertices: Float[Tensor, "..."]
self.register_buffer(
"_grid_vertices",
torch.from_numpy(tets["vertices"]).float(),
persistent=False,
)
self.indices: Integer[Tensor, "..."]
self.register_buffer(
"indices", torch.from_numpy(tets["indices"]).long(), persistent=False
)
self._all_edges: Optional[Integer[Tensor, "Ne 2"]] = None
def normalize_grid_deformation(
self, grid_vertex_offsets: Float[Tensor, "Nv 3"]
) -> Float[Tensor, "Nv 3"]:
return (
(self.points_range[1] - self.points_range[0])
/ (self.resolution) # half tet size is approximately 1 / self.resolution
* torch.tanh(grid_vertex_offsets)
) # FIXME: hard-coded activation
@property
def grid_vertices(self) -> Float[Tensor, "Nv 3"]:
return self._grid_vertices
@property
def all_edges(self) -> Integer[Tensor, "Ne 2"]:
if self._all_edges is None:
# compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)
edges = torch.tensor(
[0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],
dtype=torch.long,
device=self.indices.device,
)
_all_edges = self.indices[:, edges].reshape(-1, 2)
_all_edges_sorted = torch.sort(_all_edges, dim=1)[0]
_all_edges = torch.unique(_all_edges_sorted, dim=0)
self._all_edges = _all_edges
return self._all_edges
def sort_edges(self, edges_ex2):
with torch.no_grad():
order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()
order = order.unsqueeze(dim=1)
a = torch.gather(input=edges_ex2, index=order, dim=1)
b = torch.gather(input=edges_ex2, index=1 - order, dim=1)
return torch.stack([a, b], -1)
def _forward(self, pos_nx3, sdf_n, tet_fx4):
with torch.no_grad():
occ_n = sdf_n > 0
occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)
occ_sum = torch.sum(occ_fx4, -1)
valid_tets = (occ_sum > 0) & (occ_sum < 4)
occ_sum = occ_sum[valid_tets]
# find all vertices
all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)
all_edges = self.sort_edges(all_edges)
unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)
unique_edges = unique_edges.long()
mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1
mapping = (
torch.ones(
(unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device
)
* -1
)
mapping[mask_edges] = torch.arange(
mask_edges.sum(), dtype=torch.long, device=pos_nx3.device
)
idx_map = mapping[idx_map] # map edges to verts
interp_v = unique_edges[mask_edges]
edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)
edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)
edges_to_interp_sdf[:, -1] *= -1
denominator = edges_to_interp_sdf.sum(1, keepdim=True)
edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator
verts = (edges_to_interp * edges_to_interp_sdf).sum(1)
idx_map = idx_map.reshape(-1, 6)
v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))
tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)
num_triangles = self.num_triangles_table[tetindex]
# Generate triangle indices
faces = torch.cat(
(
torch.gather(
input=idx_map[num_triangles == 1],
dim=1,
index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],
).reshape(-1, 3),
torch.gather(
input=idx_map[num_triangles == 2],
dim=1,
index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],
).reshape(-1, 3),
),
dim=0,
)
return verts, faces
def forward(
self,
level: Float[Tensor, "N3 1"],
deformation: Optional[Float[Tensor, "N3 3"]] = None,
) -> Mesh:
if deformation is not None:
grid_vertices = self.grid_vertices + self.normalize_grid_deformation(
deformation
)
else:
grid_vertices = self.grid_vertices
v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)
mesh = Mesh(
v_pos=v_pos,
t_pos_idx=t_pos_idx,
# extras
grid_vertices=grid_vertices,
tet_edges=self.all_edges,
grid_level=level,
grid_deformation=deformation,
)
return mesh
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/__init__.py | threestudio/models/__init__.py | from . import (
background,
exporters,
geometry,
guidance,
materials,
prompt_processors,
renderers,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/networks.py | threestudio/models/networks.py | import math
import tinycudann as tcnn
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.utils.base import Updateable
from threestudio.utils.config import config_to_primitive
from threestudio.utils.misc import get_rank
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
class ProgressiveBandFrequency(nn.Module, Updateable):
def __init__(self, in_channels: int, config: dict):
super().__init__()
self.N_freqs = config["n_frequencies"]
self.in_channels, self.n_input_dims = in_channels, in_channels
self.funcs = [torch.sin, torch.cos]
self.freq_bands = 2 ** torch.linspace(0, self.N_freqs - 1, self.N_freqs)
self.n_output_dims = self.in_channels * (len(self.funcs) * self.N_freqs)
self.n_masking_step = config.get("n_masking_step", 0)
self.update_step(
None, None
) # mask should be updated at the beginning each step
def forward(self, x):
out = []
for freq, mask in zip(self.freq_bands, self.mask):
for func in self.funcs:
out += [func(freq * x) * mask]
return torch.cat(out, -1)
def update_step(self, epoch, global_step, on_load_weights=False):
if self.n_masking_step <= 0 or global_step is None:
self.mask = torch.ones(self.N_freqs, dtype=torch.float32)
else:
self.mask = (
1.0
- torch.cos(
math.pi
* (
global_step / self.n_masking_step * self.N_freqs
- torch.arange(0, self.N_freqs)
).clamp(0, 1)
)
) / 2.0
threestudio.debug(
f"Update mask: {global_step}/{self.n_masking_step} {self.mask}"
)
class TCNNEncoding(nn.Module):
def __init__(self, in_channels, config, dtype=torch.float32) -> None:
super().__init__()
self.n_input_dims = in_channels
with torch.cuda.device(get_rank()):
self.encoding = tcnn.Encoding(in_channels, config, dtype=dtype)
self.n_output_dims = self.encoding.n_output_dims
def forward(self, x):
return self.encoding(x)
class ProgressiveBandHashGrid(nn.Module, Updateable):
def __init__(self, in_channels, config, dtype=torch.float32):
super().__init__()
self.n_input_dims = in_channels
encoding_config = config.copy()
encoding_config["otype"] = "Grid"
encoding_config["type"] = "Hash"
with torch.cuda.device(get_rank()):
self.encoding = tcnn.Encoding(in_channels, encoding_config, dtype=dtype)
self.n_output_dims = self.encoding.n_output_dims
self.n_level = config["n_levels"]
self.n_features_per_level = config["n_features_per_level"]
self.start_level, self.start_step, self.update_steps = (
config["start_level"],
config["start_step"],
config["update_steps"],
)
self.current_level = self.start_level
self.mask = torch.zeros(
self.n_level * self.n_features_per_level,
dtype=torch.float32,
device=get_rank(),
)
def forward(self, x):
enc = self.encoding(x)
enc = enc * self.mask
return enc
def update_step(self, epoch, global_step, on_load_weights=False):
current_level = min(
self.start_level
+ max(global_step - self.start_step, 0) // self.update_steps,
self.n_level,
)
if current_level > self.current_level:
threestudio.debug(f"Update current level to {current_level}")
self.current_level = current_level
self.mask[: self.current_level * self.n_features_per_level] = 1.0
class CompositeEncoding(nn.Module, Updateable):
def __init__(self, encoding, include_xyz=False, xyz_scale=2.0, xyz_offset=-1.0):
super(CompositeEncoding, self).__init__()
self.encoding = encoding
self.include_xyz, self.xyz_scale, self.xyz_offset = (
include_xyz,
xyz_scale,
xyz_offset,
)
self.n_output_dims = (
int(self.include_xyz) * self.encoding.n_input_dims
+ self.encoding.n_output_dims
)
def forward(self, x, *args):
return (
self.encoding(x, *args)
if not self.include_xyz
else torch.cat(
[x * self.xyz_scale + self.xyz_offset, self.encoding(x, *args)], dim=-1
)
)
def get_encoding(n_input_dims: int, config) -> nn.Module:
# input suppose to be range [0, 1]
encoding: nn.Module
if config.otype == "ProgressiveBandFrequency":
encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))
elif config.otype == "ProgressiveBandHashGrid":
encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))
else:
encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))
encoding = CompositeEncoding(
encoding,
include_xyz=config.get("include_xyz", False),
xyz_scale=2.0,
xyz_offset=-1.0,
) # FIXME: hard coded
return encoding
class VanillaMLP(nn.Module):
def __init__(self, dim_in: int, dim_out: int, config: dict):
super().__init__()
self.n_neurons, self.n_hidden_layers = (
config["n_neurons"],
config["n_hidden_layers"],
)
layers = [
self.make_linear(dim_in, self.n_neurons, is_first=True, is_last=False),
self.make_activation(),
]
for i in range(self.n_hidden_layers - 1):
layers += [
self.make_linear(
self.n_neurons, self.n_neurons, is_first=False, is_last=False
),
self.make_activation(),
]
layers += [
self.make_linear(self.n_neurons, dim_out, is_first=False, is_last=True)
]
self.layers = nn.Sequential(*layers)
self.output_activation = get_activation(config.get("output_activation", None))
def forward(self, x):
# disable autocast
# strange that the parameters will have empty gradients if autocast is enabled in AMP
with torch.cuda.amp.autocast(enabled=False):
x = self.layers(x)
x = self.output_activation(x)
return x
def make_linear(self, dim_in, dim_out, is_first, is_last):
layer = nn.Linear(dim_in, dim_out, bias=False)
return layer
def make_activation(self):
return nn.ReLU(inplace=True)
class SphereInitVanillaMLP(nn.Module):
def __init__(self, dim_in, dim_out, config):
super().__init__()
self.n_neurons, self.n_hidden_layers = (
config["n_neurons"],
config["n_hidden_layers"],
)
self.sphere_init, self.weight_norm = True, True
self.sphere_init_radius = config["sphere_init_radius"]
self.sphere_init_inside_out = config["inside_out"]
self.layers = [
self.make_linear(dim_in, self.n_neurons, is_first=True, is_last=False),
self.make_activation(),
]
for i in range(self.n_hidden_layers - 1):
self.layers += [
self.make_linear(
self.n_neurons, self.n_neurons, is_first=False, is_last=False
),
self.make_activation(),
]
self.layers += [
self.make_linear(self.n_neurons, dim_out, is_first=False, is_last=True)
]
self.layers = nn.Sequential(*self.layers)
self.output_activation = get_activation(config.get("output_activation", None))
def forward(self, x):
# disable autocast
# strange that the parameters will have empty gradients if autocast is enabled in AMP
with torch.cuda.amp.autocast(enabled=False):
x = self.layers(x)
x = self.output_activation(x)
return x
def make_linear(self, dim_in, dim_out, is_first, is_last):
layer = nn.Linear(dim_in, dim_out, bias=True)
if is_last:
if not self.sphere_init_inside_out:
torch.nn.init.constant_(layer.bias, -self.sphere_init_radius)
torch.nn.init.normal_(
layer.weight,
mean=math.sqrt(math.pi) / math.sqrt(dim_in),
std=0.0001,
)
else:
torch.nn.init.constant_(layer.bias, self.sphere_init_radius)
torch.nn.init.normal_(
layer.weight,
mean=-math.sqrt(math.pi) / math.sqrt(dim_in),
std=0.0001,
)
elif is_first:
torch.nn.init.constant_(layer.bias, 0.0)
torch.nn.init.constant_(layer.weight[:, 3:], 0.0)
torch.nn.init.normal_(
layer.weight[:, :3], 0.0, math.sqrt(2) / math.sqrt(dim_out)
)
else:
torch.nn.init.constant_(layer.bias, 0.0)
torch.nn.init.normal_(layer.weight, 0.0, math.sqrt(2) / math.sqrt(dim_out))
if self.weight_norm:
layer = nn.utils.weight_norm(layer)
return layer
def make_activation(self):
return nn.Softplus(beta=100)
class TCNNNetwork(nn.Module):
def __init__(self, dim_in: int, dim_out: int, config: dict) -> None:
super().__init__()
with torch.cuda.device(get_rank()):
self.network = tcnn.Network(dim_in, dim_out, config)
def forward(self, x):
return self.network(x).float() # transform to float32
def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:
network: nn.Module
if config.otype == "VanillaMLP":
network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))
elif config.otype == "SphereInitVanillaMLP":
network = SphereInitVanillaMLP(
n_input_dims, n_output_dims, config_to_primitive(config)
)
else:
assert (
config.get("sphere_init", False) is False
), "sphere_init=True only supported by VanillaMLP"
network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))
return network
class NetworkWithInputEncoding(nn.Module, Updateable):
def __init__(self, encoding, network):
super().__init__()
self.encoding, self.network = encoding, network
def forward(self, x):
return self.network(self.encoding(x))
class TCNNNetworkWithInputEncoding(nn.Module):
def __init__(
self,
n_input_dims: int,
n_output_dims: int,
encoding_config: dict,
network_config: dict,
) -> None:
super().__init__()
with torch.cuda.device(get_rank()):
self.network_with_input_encoding = tcnn.NetworkWithInputEncoding(
n_input_dims=n_input_dims,
n_output_dims=n_output_dims,
encoding_config=encoding_config,
network_config=network_config,
)
def forward(self, x):
return self.network_with_input_encoding(x).float() # transform to float32
def create_network_with_input_encoding(
n_input_dims: int, n_output_dims: int, encoding_config, network_config
) -> nn.Module:
# input suppose to be range [0, 1]
network_with_input_encoding: nn.Module
if encoding_config.otype in [
"VanillaFrequency",
"ProgressiveBandHashGrid",
] or network_config.otype in ["VanillaMLP", "SphereInitVanillaMLP"]:
encoding = get_encoding(n_input_dims, encoding_config)
network = get_mlp(encoding.n_output_dims, n_output_dims, network_config)
network_with_input_encoding = NetworkWithInputEncoding(encoding, network)
else:
network_with_input_encoding = TCNNNetworkWithInputEncoding(
n_input_dims=n_input_dims,
n_output_dims=n_output_dims,
encoding_config=config_to_primitive(encoding_config),
network_config=config_to_primitive(network_config),
)
return network_with_input_encoding
class ToDTypeWrapper(nn.Module):
def __init__(self, module: nn.Module, dtype: torch.dtype):
super().__init__()
self.module = module
self.dtype = dtype
def forward(self, x: Float[Tensor, "..."]) -> Float[Tensor, "..."]:
return self.module(x).to(self.dtype)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/mesh.py | threestudio/models/mesh.py | from __future__ import annotations
import numpy as np
import torch
import torch.nn.functional as F
import threestudio
from threestudio.utils.ops import dot
from threestudio.utils.typing import *
class Mesh:
def __init__(
self, v_pos: Float[Tensor, "Nv 3"], t_pos_idx: Integer[Tensor, "Nf 3"], **kwargs
) -> None:
self.v_pos: Float[Tensor, "Nv 3"] = v_pos
self.t_pos_idx: Integer[Tensor, "Nf 3"] = t_pos_idx
self._v_nrm: Optional[Float[Tensor, "Nv 3"]] = None
self._v_tng: Optional[Float[Tensor, "Nv 3"]] = None
self._v_tex: Optional[Float[Tensor, "Nt 3"]] = None
self._t_tex_idx: Optional[Float[Tensor, "Nf 3"]] = None
self._v_rgb: Optional[Float[Tensor, "Nv 3"]] = None
self._edges: Optional[Integer[Tensor, "Ne 2"]] = None
self.extras: Dict[str, Any] = {}
for k, v in kwargs.items():
self.add_extra(k, v)
def add_extra(self, k, v) -> None:
self.extras[k] = v
def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:
if self.requires_grad:
threestudio.debug("Mesh is differentiable, not removing outliers")
return self
# use trimesh to first split the mesh into connected components
# then remove the components with less than n_face_threshold faces
import trimesh
# construct a trimesh object
mesh = trimesh.Trimesh(
vertices=self.v_pos.detach().cpu().numpy(),
faces=self.t_pos_idx.detach().cpu().numpy(),
)
# split the mesh into connected components
components = mesh.split(only_watertight=False)
# log the number of faces in each component
threestudio.debug(
"Mesh has {} components, with faces: {}".format(
len(components), [c.faces.shape[0] for c in components]
)
)
n_faces_threshold: int
if isinstance(outlier_n_faces_threshold, float):
# set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold
n_faces_threshold = int(
max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold
)
else:
# set the threshold directly to outlier_n_faces_threshold
n_faces_threshold = outlier_n_faces_threshold
# log the threshold
threestudio.debug(
"Removing components with less than {} faces".format(n_faces_threshold)
)
# remove the components with less than n_face_threshold faces
components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]
# log the number of faces in each component after removing outliers
threestudio.debug(
"Mesh has {} components after removing outliers, with faces: {}".format(
len(components), [c.faces.shape[0] for c in components]
)
)
# merge the components
mesh = trimesh.util.concatenate(components)
# convert back to our mesh format
v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)
t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)
clean_mesh = Mesh(v_pos, t_pos_idx)
# keep the extras unchanged
if len(self.extras) > 0:
clean_mesh.extras = self.extras
threestudio.debug(
f"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}"
)
return clean_mesh
@property
def requires_grad(self):
return self.v_pos.requires_grad
@property
def v_nrm(self):
if self._v_nrm is None:
self._v_nrm = self._compute_vertex_normal()
return self._v_nrm
@property
def v_tng(self):
if self._v_tng is None:
self._v_tng = self._compute_vertex_tangent()
return self._v_tng
@property
def v_tex(self):
if self._v_tex is None:
self._v_tex, self._t_tex_idx = self._unwrap_uv()
return self._v_tex
@property
def t_tex_idx(self):
if self._t_tex_idx is None:
self._v_tex, self._t_tex_idx = self._unwrap_uv()
return self._t_tex_idx
@property
def v_rgb(self):
return self._v_rgb
@property
def edges(self):
if self._edges is None:
self._edges = self._compute_edges()
return self._edges
def _compute_vertex_normal(self):
i0 = self.t_pos_idx[:, 0]
i1 = self.t_pos_idx[:, 1]
i2 = self.t_pos_idx[:, 2]
v0 = self.v_pos[i0, :]
v1 = self.v_pos[i1, :]
v2 = self.v_pos[i2, :]
face_normals = torch.cross(v1 - v0, v2 - v0)
# Splat face normals to vertices
v_nrm = torch.zeros_like(self.v_pos)
v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)
v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)
v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)
# Normalize, replace zero (degenerated) normals with some default value
v_nrm = torch.where(
dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)
)
v_nrm = F.normalize(v_nrm, dim=1)
if torch.is_anomaly_enabled():
assert torch.all(torch.isfinite(v_nrm))
return v_nrm
def _compute_vertex_tangent(self):
vn_idx = [None] * 3
pos = [None] * 3
tex = [None] * 3
for i in range(0, 3):
pos[i] = self.v_pos[self.t_pos_idx[:, i]]
tex[i] = self.v_tex[self.t_tex_idx[:, i]]
# t_nrm_idx is always the same as t_pos_idx
vn_idx[i] = self.t_pos_idx[:, i]
tangents = torch.zeros_like(self.v_nrm)
tansum = torch.zeros_like(self.v_nrm)
# Compute tangent space for each triangle
uve1 = tex[1] - tex[0]
uve2 = tex[2] - tex[0]
pe1 = pos[1] - pos[0]
pe2 = pos[2] - pos[0]
nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]
denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]
# Avoid division by zero for degenerated texture coordinates
tang = nom / torch.where(
denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)
)
# Update all 3 vertices
for i in range(0, 3):
idx = vn_idx[i][:, None].repeat(1, 3)
tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang
tansum.scatter_add_(
0, idx, torch.ones_like(tang)
) # tansum[n_i] = tansum[n_i] + 1
tangents = tangents / tansum
# Normalize and make sure tangent is perpendicular to normal
tangents = F.normalize(tangents, dim=1)
tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)
if torch.is_anomaly_enabled():
assert torch.all(torch.isfinite(tangents))
return tangents
def _unwrap_uv(
self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}
):
threestudio.info("Using xatlas to perform UV unwrapping, may take a while ...")
import xatlas
atlas = xatlas.Atlas()
atlas.add_mesh(
self.v_pos.detach().cpu().numpy(),
self.t_pos_idx.cpu().numpy(),
)
co = xatlas.ChartOptions()
po = xatlas.PackOptions()
for k, v in xatlas_chart_options.items():
setattr(co, k, v)
for k, v in xatlas_pack_options.items():
setattr(po, k, v)
atlas.generate(co, po)
vmapping, indices, uvs = atlas.get_mesh(0)
vmapping = (
torch.from_numpy(
vmapping.astype(np.uint64, casting="same_kind").view(np.int64)
)
.to(self.v_pos.device)
.long()
)
uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()
indices = (
torch.from_numpy(
indices.astype(np.uint64, casting="same_kind").view(np.int64)
)
.to(self.v_pos.device)
.long()
)
return uvs, indices
def unwrap_uv(
self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}
):
self._v_tex, self._t_tex_idx = self._unwrap_uv(
xatlas_chart_options, xatlas_pack_options
)
def set_vertex_color(self, v_rgb):
assert v_rgb.shape[0] == self.v_pos.shape[0]
self._v_rgb = v_rgb
def _compute_edges(self):
# Compute edges
edges = torch.cat(
[
self.t_pos_idx[:, [0, 1]],
self.t_pos_idx[:, [1, 2]],
self.t_pos_idx[:, [2, 0]],
],
dim=0,
)
edges = edges.sort()[0]
edges = torch.unique(edges, dim=0)
return edges
def normal_consistency(self) -> Float[Tensor, ""]:
edge_nrm: Float[Tensor, "Ne 2 3"] = self.v_nrm[self.edges]
nc = (
1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)
).mean()
return nc
def _laplacian_uniform(self):
# from stable-dreamfusion
# https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224
verts, faces = self.v_pos, self.t_pos_idx
V = verts.shape[0]
F = faces.shape[0]
# Neighbor indices
ii = faces[:, [1, 2, 0]].flatten()
jj = faces[:, [2, 0, 1]].flatten()
adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(
dim=1
)
adj_values = torch.ones(adj.shape[1]).to(verts)
# Diagonal indices
diag_idx = adj[0]
# Build the sparse matrix
idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)
values = torch.cat((-adj_values, adj_values))
# The coalesce operation sums the duplicate indices, resulting in the
# correct diagonal
return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()
def laplacian(self) -> Float[Tensor, ""]:
with torch.no_grad():
L = self._laplacian_uniform()
loss = L.mm(self.v_pos)
loss = loss.norm(dim=1)
loss = loss.mean()
return loss
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/background/neural_environment_map_background.py | threestudio/models/background/neural_environment_map_background.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
@threestudio.register("neural-environment-map-background")
class NeuralEnvironmentMapBackground(BaseBackground):
@dataclass
class Config(BaseBackground.Config):
n_output_dims: int = 3
color_activation: str = "sigmoid"
dir_encoding_config: dict = field(
default_factory=lambda: {"otype": "SphericalHarmonics", "degree": 3}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"n_neurons": 16,
"n_hidden_layers": 2,
}
)
random_aug: bool = False
random_aug_prob: float = 0.5
eval_color: Optional[Tuple[float, float, float]] = None
# multi-view diffusion
share_aug_bg: bool = False
cfg: Config
def configure(self) -> None:
self.encoding = get_encoding(3, self.cfg.dir_encoding_config)
self.network = get_mlp(
self.encoding.n_output_dims,
self.cfg.n_output_dims,
self.cfg.mlp_network_config,
)
def forward(self, dirs: Float[Tensor, "B H W 3"]) -> Float[Tensor, "B H W Nc"]:
if not self.training and self.cfg.eval_color is not None:
return torch.ones(*dirs.shape[:-1], self.cfg.n_output_dims).to(
dirs
) * torch.as_tensor(self.cfg.eval_color).to(dirs)
# viewdirs must be normalized before passing to this function
dirs = (dirs + 1.0) / 2.0 # (-1, 1) => (0, 1)
dirs_embd = self.encoding(dirs.view(-1, 3))
color = self.network(dirs_embd).view(*dirs.shape[:-1], self.cfg.n_output_dims)
color = get_activation(self.cfg.color_activation)(color)
if (
self.training
and self.cfg.random_aug
and random.random() < self.cfg.random_aug_prob
):
# use random background color with probability random_aug_prob
n_color = 1 if self.cfg.share_aug_bg else dirs.shape[0]
color = color * 0 + ( # prevent checking for unused parameters in DDP
torch.rand(n_color, 1, 1, self.cfg.n_output_dims)
.to(dirs)
.expand(*dirs.shape[:-1], -1)
)
return color
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/background/textured_background.py | threestudio/models/background/textured_background.py | from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
@threestudio.register("textured-background")
class TexturedBackground(BaseBackground):
@dataclass
class Config(BaseBackground.Config):
n_output_dims: int = 3
height: int = 64
width: int = 64
color_activation: str = "sigmoid"
cfg: Config
def configure(self) -> None:
self.texture = nn.Parameter(
torch.randn((1, self.cfg.n_output_dims, self.cfg.height, self.cfg.width))
)
def spherical_xyz_to_uv(self, dirs: Float[Tensor, "*B 3"]) -> Float[Tensor, "*B 2"]:
x, y, z = dirs[..., 0], dirs[..., 1], dirs[..., 2]
xy = (x**2 + y**2) ** 0.5
u = torch.atan2(xy, z) / torch.pi
v = torch.atan2(y, x) / (torch.pi * 2) + 0.5
uv = torch.stack([u, v], -1)
return uv
def forward(self, dirs: Float[Tensor, "*B 3"]) -> Float[Tensor, "*B Nc"]:
dirs_shape = dirs.shape[:-1]
uv = self.spherical_xyz_to_uv(dirs.reshape(-1, dirs.shape[-1]))
uv = 2 * uv - 1 # rescale to [-1, 1] for grid_sample
uv = uv.reshape(1, -1, 1, 2)
color = (
F.grid_sample(
self.texture,
uv,
mode="bilinear",
padding_mode="reflection",
align_corners=False,
)
.reshape(self.cfg.n_output_dims, -1)
.T.reshape(*dirs_shape, self.cfg.n_output_dims)
)
color = get_activation(self.cfg.color_activation)(color)
return color
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/background/solid_color_background.py | threestudio/models/background/solid_color_background.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.utils.typing import *
@threestudio.register("solid-color-background")
class SolidColorBackground(BaseBackground):
@dataclass
class Config(BaseBackground.Config):
n_output_dims: int = 3
color: Tuple = (1.0, 1.0, 1.0)
learned: bool = False
random_aug: bool = False
random_aug_prob: float = 0.5
cfg: Config
def configure(self) -> None:
self.env_color: Float[Tensor, "Nc"]
if self.cfg.learned:
self.env_color = nn.Parameter(
torch.as_tensor(self.cfg.color, dtype=torch.float32)
)
else:
self.register_buffer(
"env_color", torch.as_tensor(self.cfg.color, dtype=torch.float32)
)
def forward(self, dirs: Float[Tensor, "B H W 3"]) -> Float[Tensor, "B H W Nc"]:
color = (
torch.ones(*dirs.shape[:-1], self.cfg.n_output_dims).to(dirs)
* self.env_color
)
if (
self.training
and self.cfg.random_aug
and random.random() < self.cfg.random_aug_prob
):
# use random background color with probability random_aug_prob
color = color * 0 + ( # prevent checking for unused parameters in DDP
torch.rand(dirs.shape[0], 1, 1, self.cfg.n_output_dims)
.to(dirs)
.expand(*dirs.shape[:-1], -1)
)
return color
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/background/__init__.py | threestudio/models/background/__init__.py | from . import (
base,
neural_environment_map_background,
solid_color_background,
textured_background,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/background/base.py | threestudio/models/background/base.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.utils.base import BaseModule
from threestudio.utils.typing import *
class BaseBackground(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
def configure(self):
pass
def forward(self, dirs: Float[Tensor, "B H W 3"]) -> Float[Tensor, "B H W Nc"]:
raise NotImplementedError
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/nvdiff_rasterizer.py | threestudio/models/renderers/nvdiff_rasterizer.py | from dataclasses import dataclass
import nerfacc
import torch
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.renderers.base import Rasterizer, VolumeRenderer
from threestudio.utils.misc import get_device
from threestudio.utils.rasterize import NVDiffRasterizerContext
from threestudio.utils.typing import *
@threestudio.register("nvdiff-rasterizer")
class NVDiffRasterizer(Rasterizer):
@dataclass
class Config(VolumeRenderer.Config):
context_type: str = "gl"
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
self.ctx = NVDiffRasterizerContext(self.cfg.context_type, get_device())
def forward(
self,
mvp_mtx: Float[Tensor, "B 4 4"],
camera_positions: Float[Tensor, "B 3"],
light_positions: Float[Tensor, "B 3"],
height: int,
width: int,
render_rgb: bool = True,
**kwargs
) -> Dict[str, Any]:
batch_size = mvp_mtx.shape[0]
mesh = self.geometry.isosurface()
v_pos_clip: Float[Tensor, "B Nv 4"] = self.ctx.vertex_transform(
mesh.v_pos, mvp_mtx
)
rast, _ = self.ctx.rasterize(v_pos_clip, mesh.t_pos_idx, (height, width))
mask = rast[..., 3:] > 0
mask_aa = self.ctx.antialias(mask.float(), rast, v_pos_clip, mesh.t_pos_idx)
out = {"opacity": mask_aa, "mesh": mesh}
gb_normal, _ = self.ctx.interpolate_one(mesh.v_nrm, rast, mesh.t_pos_idx)
gb_normal = F.normalize(gb_normal, dim=-1)
gb_normal_aa = torch.lerp(
torch.zeros_like(gb_normal), (gb_normal + 1.0) / 2.0, mask.float()
)
gb_normal_aa = self.ctx.antialias(
gb_normal_aa, rast, v_pos_clip, mesh.t_pos_idx
)
out.update({"comp_normal": gb_normal_aa}) # in [0, 1]
# TODO: make it clear whether to compute the normal, now we compute it in all cases
# consider using: require_normal_computation = render_normal or (render_rgb and material.requires_normal)
# or
# render_normal = render_normal or (render_rgb and material.requires_normal)
if render_rgb:
selector = mask[..., 0]
gb_pos, _ = self.ctx.interpolate_one(mesh.v_pos, rast, mesh.t_pos_idx)
gb_viewdirs = F.normalize(
gb_pos - camera_positions[:, None, None, :], dim=-1
)
gb_light_positions = light_positions[:, None, None, :].expand(
-1, height, width, -1
)
positions = gb_pos[selector]
geo_out = self.geometry(positions, output_normal=False)
extra_geo_info = {}
if self.material.requires_normal:
extra_geo_info["shading_normal"] = gb_normal[selector]
if self.material.requires_tangent:
gb_tangent, _ = self.ctx.interpolate_one(
mesh.v_tng, rast, mesh.t_pos_idx
)
gb_tangent = F.normalize(gb_tangent, dim=-1)
extra_geo_info["tangent"] = gb_tangent[selector]
rgb_fg = self.material(
viewdirs=gb_viewdirs[selector],
positions=positions,
light_positions=gb_light_positions[selector],
**extra_geo_info,
**geo_out
)
gb_rgb_fg = torch.zeros(batch_size, height, width, 3).to(rgb_fg)
gb_rgb_fg[selector] = rgb_fg
gb_rgb_bg = self.background(dirs=gb_viewdirs)
gb_rgb = torch.lerp(gb_rgb_bg, gb_rgb_fg, mask.float())
gb_rgb_aa = self.ctx.antialias(gb_rgb, rast, v_pos_clip, mesh.t_pos_idx)
out.update({"comp_rgb": gb_rgb_aa, "comp_rgb_bg": gb_rgb_bg})
return out
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/deferred_volume_renderer.py | threestudio/models/renderers/deferred_volume_renderer.py | from dataclasses import dataclass
import torch
import torch.nn.functional as F
import threestudio
from threestudio.models.renderers.base import VolumeRenderer
class DeferredVolumeRenderer(VolumeRenderer):
pass
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/neus_volume_renderer.py | threestudio/models/renderers/neus_volume_renderer.py | from dataclasses import dataclass
from functools import partial
import nerfacc
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.estimators import ImportanceEstimator
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.utils.ops import chunk_batch, validate_empty_rays
from threestudio.utils.typing import *
def volsdf_density(sdf, inv_std):
inv_std = inv_std.clamp(0.0, 80.0)
beta = 1 / inv_std
alpha = inv_std
return alpha * (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() / beta))
class LearnedVariance(nn.Module):
def __init__(self, init_val):
super(LearnedVariance, self).__init__()
self.register_parameter("_inv_std", nn.Parameter(torch.tensor(init_val)))
@property
def inv_std(self):
val = torch.exp(self._inv_std * 10.0)
return val
def forward(self, x):
return torch.ones_like(x) * self.inv_std.clamp(1.0e-6, 1.0e6)
@threestudio.register("neus-volume-renderer")
class NeuSVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
num_samples_per_ray: int = 512
randomized: bool = True
eval_chunk_size: int = 160000
learned_variance_init: float = 0.3
cos_anneal_end_steps: int = 0
use_volsdf: bool = False
near_plane: float = 0.0
far_plane: float = 1e10
# in ['occgrid', 'importance']
estimator: str = "occgrid"
# for occgrid
grid_prune: bool = True
prune_alpha_threshold: bool = True
# for importance
num_samples_per_ray_importance: int = 64
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
self.variance = LearnedVariance(self.cfg.learned_variance_init)
if self.cfg.estimator == "occgrid":
self.estimator = nerfacc.OccGridEstimator(
roi_aabb=self.bbox.view(-1), resolution=32, levels=1
)
if not self.cfg.grid_prune:
self.estimator.occs.fill_(True)
self.estimator.binaries.fill_(True)
self.render_step_size = (
1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray
)
self.randomized = self.cfg.randomized
elif self.cfg.estimator == "importance":
self.estimator = ImportanceEstimator()
else:
raise NotImplementedError(
"unknown estimator, should be in ['occgrid', 'importance']"
)
self.cos_anneal_ratio = 1.0
def get_alpha(self, sdf, normal, dirs, dists):
inv_std = self.variance(sdf)
if self.cfg.use_volsdf:
alpha = torch.abs(dists.detach()) * volsdf_density(sdf, inv_std)
else:
true_cos = (dirs * normal).sum(-1, keepdim=True)
# "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes
# the cos value "not dead" at the beginning training iterations, for better convergence.
iter_cos = -(
F.relu(-true_cos * 0.5 + 0.5) * (1.0 - self.cos_anneal_ratio)
+ F.relu(-true_cos) * self.cos_anneal_ratio
) # always non-positive
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists * 0.5
estimated_prev_sdf = sdf - iter_cos * dists * 0.5
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_std)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_std)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0)
return alpha
def forward(
self,
rays_o: Float[Tensor, "B H W 3"],
rays_d: Float[Tensor, "B H W 3"],
light_positions: Float[Tensor, "B 3"],
bg_color: Optional[Tensor] = None,
**kwargs
) -> Dict[str, Float[Tensor, "..."]]:
batch_size, height, width = rays_o.shape[:3]
rays_o_flatten: Float[Tensor, "Nr 3"] = rays_o.reshape(-1, 3)
rays_d_flatten: Float[Tensor, "Nr 3"] = rays_d.reshape(-1, 3)
light_positions_flatten: Float[Tensor, "Nr 3"] = (
light_positions.reshape(-1, 1, 1, 3)
.expand(-1, height, width, -1)
.reshape(-1, 3)
)
n_rays = rays_o_flatten.shape[0]
if self.cfg.estimator == "occgrid":
def alpha_fn(t_starts, t_ends, ray_indices):
t_starts, t_ends = t_starts[..., None], t_ends[..., None]
t_origins = rays_o_flatten[ray_indices]
t_positions = (t_starts + t_ends) / 2.0
t_dirs = rays_d_flatten[ray_indices]
positions = t_origins + t_dirs * t_positions
if self.training:
sdf = self.geometry.forward_sdf(positions)[..., 0]
else:
sdf = chunk_batch(
self.geometry.forward_sdf,
self.cfg.eval_chunk_size,
positions,
)[..., 0]
inv_std = self.variance(sdf)
if self.cfg.use_volsdf:
alpha = self.render_step_size * volsdf_density(sdf, inv_std)
else:
estimated_next_sdf = sdf - self.render_step_size * 0.5
estimated_prev_sdf = sdf + self.render_step_size * 0.5
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_std)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_std)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0)
return alpha
if not self.cfg.grid_prune:
with torch.no_grad():
ray_indices, t_starts_, t_ends_ = self.estimator.sampling(
rays_o_flatten,
rays_d_flatten,
alpha_fn=None,
near_plane=self.cfg.near_plane,
far_plane=self.cfg.far_plane,
render_step_size=self.render_step_size,
alpha_thre=0.0,
stratified=self.randomized,
cone_angle=0.0,
early_stop_eps=0,
)
else:
with torch.no_grad():
ray_indices, t_starts_, t_ends_ = self.estimator.sampling(
rays_o_flatten,
rays_d_flatten,
alpha_fn=alpha_fn if self.cfg.prune_alpha_threshold else None,
near_plane=self.cfg.near_plane,
far_plane=self.cfg.far_plane,
render_step_size=self.render_step_size,
alpha_thre=0.01 if self.cfg.prune_alpha_threshold else 0.0,
stratified=self.randomized,
cone_angle=0.0,
)
elif self.cfg.estimator == "importance":
def prop_sigma_fn(
t_starts: Float[Tensor, "Nr Ns"],
t_ends: Float[Tensor, "Nr Ns"],
proposal_network,
):
if self.cfg.use_volsdf:
t_origins: Float[Tensor, "Nr 1 3"] = rays_o_flatten.unsqueeze(-2)
t_dirs: Float[Tensor, "Nr 1 3"] = rays_d_flatten.unsqueeze(-2)
positions: Float[Tensor, "Nr Ns 3"] = (
t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0
)
with torch.no_grad():
geo_out = chunk_batch(
proposal_network,
self.cfg.eval_chunk_size,
positions.reshape(-1, 3),
output_normal=False,
)
inv_std = self.variance(geo_out["sdf"])
density = volsdf_density(geo_out["sdf"], inv_std)
return density.reshape(positions.shape[:2])
else:
raise ValueError(
"Currently only VolSDF supports importance sampling."
)
t_starts_, t_ends_ = self.estimator.sampling(
prop_sigma_fns=[partial(prop_sigma_fn, proposal_network=self.geometry)],
prop_samples=[self.cfg.num_samples_per_ray_importance],
num_samples=self.cfg.num_samples_per_ray,
n_rays=n_rays,
near_plane=self.cfg.near_plane,
far_plane=self.cfg.far_plane,
sampling_type="uniform",
stratified=self.randomized,
)
ray_indices = (
torch.arange(n_rays, device=rays_o_flatten.device)
.unsqueeze(-1)
.expand(-1, t_starts_.shape[1])
)
ray_indices = ray_indices.flatten()
t_starts_ = t_starts_.flatten()
t_ends_ = t_ends_.flatten()
else:
raise NotImplementedError
ray_indices, t_starts_, t_ends_ = validate_empty_rays(
ray_indices, t_starts_, t_ends_
)
ray_indices = ray_indices.long()
t_starts, t_ends = t_starts_[..., None], t_ends_[..., None]
t_origins = rays_o_flatten[ray_indices]
t_dirs = rays_d_flatten[ray_indices]
t_light_positions = light_positions_flatten[ray_indices]
t_positions = (t_starts + t_ends) / 2.0
positions = t_origins + t_dirs * t_positions
t_intervals = t_ends - t_starts
if self.training:
geo_out = self.geometry(positions, output_normal=True)
rgb_fg_all = self.material(
viewdirs=t_dirs,
positions=positions,
light_positions=t_light_positions,
**geo_out,
**kwargs
)
comp_rgb_bg = self.background(dirs=rays_d)
else:
geo_out = chunk_batch(
self.geometry,
self.cfg.eval_chunk_size,
positions,
output_normal=True,
)
rgb_fg_all = chunk_batch(
self.material,
self.cfg.eval_chunk_size,
viewdirs=t_dirs,
positions=positions,
light_positions=t_light_positions,
**geo_out
)
comp_rgb_bg = chunk_batch(
self.background, self.cfg.eval_chunk_size, dirs=rays_d
)
# grad or normal?
alpha: Float[Tensor, "Nr 1"] = self.get_alpha(
geo_out["sdf"], geo_out["normal"], t_dirs, t_intervals
)
weights: Float[Tensor, "Nr 1"]
weights_, _ = nerfacc.render_weight_from_alpha(
alpha[..., 0],
ray_indices=ray_indices,
n_rays=n_rays,
)
weights = weights_[..., None]
opacity: Float[Tensor, "Nr 1"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=None, ray_indices=ray_indices, n_rays=n_rays
)
depth: Float[Tensor, "Nr 1"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=t_positions, ray_indices=ray_indices, n_rays=n_rays
)
comp_rgb_fg: Float[Tensor, "Nr Nc"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=rgb_fg_all, ray_indices=ray_indices, n_rays=n_rays
)
if bg_color is None:
bg_color = comp_rgb_bg
if bg_color.shape[:-1] == (batch_size, height, width):
bg_color = bg_color.reshape(batch_size * height * width, -1)
comp_rgb = comp_rgb_fg + bg_color * (1.0 - opacity)
out = {
"comp_rgb": comp_rgb.view(batch_size, height, width, -1),
"comp_rgb_fg": comp_rgb_fg.view(batch_size, height, width, -1),
"comp_rgb_bg": comp_rgb_bg.view(batch_size, height, width, -1),
"opacity": opacity.view(batch_size, height, width, 1),
"depth": depth.view(batch_size, height, width, 1),
}
if self.training:
out.update(
{
"weights": weights,
"t_points": t_positions,
"t_intervals": t_intervals,
"t_dirs": t_dirs,
"ray_indices": ray_indices,
"points": positions,
**geo_out,
}
)
else:
if "normal" in geo_out:
comp_normal: Float[Tensor, "Nr 3"] = nerfacc.accumulate_along_rays(
weights[..., 0],
values=geo_out["normal"],
ray_indices=ray_indices,
n_rays=n_rays,
)
comp_normal = F.normalize(comp_normal, dim=-1)
comp_normal = (comp_normal + 1.0) / 2.0 * opacity # for visualization
out.update(
{
"comp_normal": comp_normal.view(batch_size, height, width, 3),
}
)
out.update({"inv_std": self.variance.inv_std})
return out
def update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
) -> None:
self.cos_anneal_ratio = (
1.0
if self.cfg.cos_anneal_end_steps == 0
else min(1.0, global_step / self.cfg.cos_anneal_end_steps)
)
if self.cfg.estimator == "occgrid":
if self.cfg.grid_prune:
def occ_eval_fn(x):
sdf = self.geometry.forward_sdf(x)
inv_std = self.variance(sdf)
if self.cfg.use_volsdf:
alpha = self.render_step_size * volsdf_density(sdf, inv_std)
else:
estimated_next_sdf = sdf - self.render_step_size * 0.5
estimated_prev_sdf = sdf + self.render_step_size * 0.5
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_std)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_std)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0)
return alpha
if self.training and not on_load_weights:
self.estimator.update_every_n_steps(
step=global_step, occ_eval_fn=occ_eval_fn
)
def train(self, mode=True):
self.randomized = mode and self.cfg.randomized
return super().train(mode=mode)
def eval(self):
self.randomized = False
return super().eval()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/nerf_volume_renderer.py | threestudio/models/renderers/nerf_volume_renderer.py | from dataclasses import dataclass, field
from functools import partial
import nerfacc
import torch
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.estimators import ImportanceEstimator
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import create_network_with_input_encoding
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.systems.utils import parse_optimizer, parse_scheduler_to_instance
from threestudio.utils.ops import chunk_batch, get_activation, validate_empty_rays
from threestudio.utils.typing import *
@threestudio.register("nerf-volume-renderer")
class NeRFVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
num_samples_per_ray: int = 512
eval_chunk_size: int = 160000
randomized: bool = True
near_plane: float = 0.0
far_plane: float = 1e10
return_comp_normal: bool = False
return_normal_perturb: bool = False
# in ["occgrid", "proposal", "importance"]
estimator: str = "occgrid"
# for occgrid
grid_prune: bool = True
prune_alpha_threshold: bool = True
# for proposal
proposal_network_config: Optional[dict] = None
prop_optimizer_config: Optional[dict] = None
prop_scheduler_config: Optional[dict] = None
num_samples_per_ray_proposal: int = 64
# for importance
num_samples_per_ray_importance: int = 64
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
if self.cfg.estimator == "occgrid":
self.estimator = nerfacc.OccGridEstimator(
roi_aabb=self.bbox.view(-1), resolution=32, levels=1
)
if not self.cfg.grid_prune:
self.estimator.occs.fill_(True)
self.estimator.binaries.fill_(True)
self.render_step_size = (
1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray
)
self.randomized = self.cfg.randomized
elif self.cfg.estimator == "importance":
self.estimator = ImportanceEstimator()
elif self.cfg.estimator == "proposal":
self.prop_net = create_network_with_input_encoding(
**self.cfg.proposal_network_config
)
self.prop_optim = parse_optimizer(
self.cfg.prop_optimizer_config, self.prop_net
)
self.prop_scheduler = (
parse_scheduler_to_instance(
self.cfg.prop_scheduler_config, self.prop_optim
)
if self.cfg.prop_scheduler_config is not None
else None
)
self.estimator = nerfacc.PropNetEstimator(
self.prop_optim, self.prop_scheduler
)
def get_proposal_requires_grad_fn(
target: float = 5.0, num_steps: int = 1000
):
schedule = lambda s: min(s / num_steps, 1.0) * target
steps_since_last_grad = 0
def proposal_requires_grad_fn(step: int) -> bool:
nonlocal steps_since_last_grad
target_steps_since_last_grad = schedule(step)
requires_grad = steps_since_last_grad > target_steps_since_last_grad
if requires_grad:
steps_since_last_grad = 0
steps_since_last_grad += 1
return requires_grad
return proposal_requires_grad_fn
self.proposal_requires_grad_fn = get_proposal_requires_grad_fn()
self.randomized = self.cfg.randomized
else:
raise NotImplementedError(
"Unknown estimator, should be one of ['occgrid', 'proposal', 'importance']."
)
# for proposal
self.vars_in_forward = {}
def forward(
self,
rays_o: Float[Tensor, "B H W 3"],
rays_d: Float[Tensor, "B H W 3"],
light_positions: Float[Tensor, "B 3"],
bg_color: Optional[Tensor] = None,
**kwargs
) -> Dict[str, Float[Tensor, "..."]]:
batch_size, height, width = rays_o.shape[:3]
rays_o_flatten: Float[Tensor, "Nr 3"] = rays_o.reshape(-1, 3)
rays_d_flatten: Float[Tensor, "Nr 3"] = rays_d.reshape(-1, 3)
light_positions_flatten: Float[Tensor, "Nr 3"] = (
light_positions.reshape(-1, 1, 1, 3)
.expand(-1, height, width, -1)
.reshape(-1, 3)
)
n_rays = rays_o_flatten.shape[0]
if self.cfg.estimator == "occgrid":
if not self.cfg.grid_prune:
with torch.no_grad():
ray_indices, t_starts_, t_ends_ = self.estimator.sampling(
rays_o_flatten,
rays_d_flatten,
sigma_fn=None,
near_plane=self.cfg.near_plane,
far_plane=self.cfg.far_plane,
render_step_size=self.render_step_size,
alpha_thre=0.0,
stratified=self.randomized,
cone_angle=0.0,
early_stop_eps=0,
)
else:
def sigma_fn(t_starts, t_ends, ray_indices):
t_starts, t_ends = t_starts[..., None], t_ends[..., None]
t_origins = rays_o_flatten[ray_indices]
t_positions = (t_starts + t_ends) / 2.0
t_dirs = rays_d_flatten[ray_indices]
positions = t_origins + t_dirs * t_positions
if self.training:
sigma = self.geometry.forward_density(positions)[..., 0]
else:
sigma = chunk_batch(
self.geometry.forward_density,
self.cfg.eval_chunk_size,
positions,
)[..., 0]
return sigma
with torch.no_grad():
ray_indices, t_starts_, t_ends_ = self.estimator.sampling(
rays_o_flatten,
rays_d_flatten,
sigma_fn=sigma_fn if self.cfg.prune_alpha_threshold else None,
near_plane=self.cfg.near_plane,
far_plane=self.cfg.far_plane,
render_step_size=self.render_step_size,
alpha_thre=0.01 if self.cfg.prune_alpha_threshold else 0.0,
stratified=self.randomized,
cone_angle=0.0,
)
elif self.cfg.estimator == "proposal":
def prop_sigma_fn(
t_starts: Float[Tensor, "Nr Ns"],
t_ends: Float[Tensor, "Nr Ns"],
proposal_network,
):
t_origins: Float[Tensor, "Nr 1 3"] = rays_o_flatten.unsqueeze(-2)
t_dirs: Float[Tensor, "Nr 1 3"] = rays_d_flatten.unsqueeze(-2)
positions: Float[Tensor, "Nr Ns 3"] = (
t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0
)
aabb_min, aabb_max = self.bbox[0], self.bbox[1]
positions = (positions - aabb_min) / (aabb_max - aabb_min)
selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1)
density_before_activation = (
proposal_network(positions.view(-1, 3))
.view(*positions.shape[:-1], 1)
.to(positions)
)
density: Float[Tensor, "Nr Ns 1"] = (
get_activation("shifted_trunc_exp")(density_before_activation)
* selector[..., None]
)
return density.squeeze(-1)
t_starts_, t_ends_ = self.estimator.sampling(
prop_sigma_fns=[partial(prop_sigma_fn, proposal_network=self.prop_net)],
prop_samples=[self.cfg.num_samples_per_ray_proposal],
num_samples=self.cfg.num_samples_per_ray,
n_rays=n_rays,
near_plane=self.cfg.near_plane,
far_plane=self.cfg.far_plane,
sampling_type="uniform",
stratified=self.randomized,
requires_grad=self.vars_in_forward["requires_grad"],
)
ray_indices = (
torch.arange(n_rays, device=rays_o_flatten.device)
.unsqueeze(-1)
.expand(-1, t_starts_.shape[1])
)
ray_indices = ray_indices.flatten()
t_starts_ = t_starts_.flatten()
t_ends_ = t_ends_.flatten()
elif self.cfg.estimator == "importance":
def prop_sigma_fn(
t_starts: Float[Tensor, "Nr Ns"],
t_ends: Float[Tensor, "Nr Ns"],
proposal_network,
):
t_origins: Float[Tensor, "Nr 1 3"] = rays_o_flatten.unsqueeze(-2)
t_dirs: Float[Tensor, "Nr 1 3"] = rays_d_flatten.unsqueeze(-2)
positions: Float[Tensor, "Nr Ns 3"] = (
t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0
)
with torch.no_grad():
geo_out = chunk_batch(
proposal_network,
self.cfg.eval_chunk_size,
positions.reshape(-1, 3),
output_normal=False,
)
density = geo_out["density"]
return density.reshape(positions.shape[:2])
t_starts_, t_ends_ = self.estimator.sampling(
prop_sigma_fns=[partial(prop_sigma_fn, proposal_network=self.geometry)],
prop_samples=[self.cfg.num_samples_per_ray_importance],
num_samples=self.cfg.num_samples_per_ray,
n_rays=n_rays,
near_plane=self.cfg.near_plane,
far_plane=self.cfg.far_plane,
sampling_type="uniform",
stratified=self.randomized,
)
ray_indices = (
torch.arange(n_rays, device=rays_o_flatten.device)
.unsqueeze(-1)
.expand(-1, t_starts_.shape[1])
)
ray_indices = ray_indices.flatten()
t_starts_ = t_starts_.flatten()
t_ends_ = t_ends_.flatten()
else:
raise NotImplementedError
ray_indices, t_starts_, t_ends_ = validate_empty_rays(
ray_indices, t_starts_, t_ends_
)
ray_indices = ray_indices.long()
t_starts, t_ends = t_starts_[..., None], t_ends_[..., None]
t_origins = rays_o_flatten[ray_indices]
t_dirs = rays_d_flatten[ray_indices]
t_light_positions = light_positions_flatten[ray_indices]
t_positions = (t_starts + t_ends) / 2.0
positions = t_origins + t_dirs * t_positions
t_intervals = t_ends - t_starts
#self.material.requires_normal = True
if self.training:
geo_out = self.geometry(
positions, output_normal=self.material.requires_normal
)
rgb_fg_all = self.material(
viewdirs=t_dirs,
positions=positions,
light_positions=t_light_positions,
**geo_out,
**kwargs
)
comp_rgb_bg = self.background(dirs=rays_d)
else:
geo_out = chunk_batch(
self.geometry,
self.cfg.eval_chunk_size,
positions,
output_normal=self.material.requires_normal,
)
rgb_fg_all = chunk_batch(
self.material,
self.cfg.eval_chunk_size,
viewdirs=t_dirs,
positions=positions,
light_positions=t_light_positions,
**geo_out
)
comp_rgb_bg = chunk_batch(
self.background, self.cfg.eval_chunk_size, dirs=rays_d
)
weights: Float[Tensor, "Nr 1"]
weights_, trans_, _ = nerfacc.render_weight_from_density(
t_starts[..., 0],
t_ends[..., 0],
geo_out["density"][..., 0],
ray_indices=ray_indices,
n_rays=n_rays,
)
if self.training and self.cfg.estimator == "proposal":
self.vars_in_forward["trans"] = trans_.reshape(n_rays, -1)
weights = weights_[..., None]
opacity: Float[Tensor, "Nr 1"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=None, ray_indices=ray_indices, n_rays=n_rays
)
depth: Float[Tensor, "Nr 1"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=t_positions, ray_indices=ray_indices, n_rays=n_rays
)
comp_rgb_fg: Float[Tensor, "Nr Nc"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=rgb_fg_all, ray_indices=ray_indices, n_rays=n_rays
)
# populate depth and opacity to each point
t_depth = depth[ray_indices]
z_variance = nerfacc.accumulate_along_rays(
weights[..., 0],
values=(t_positions - t_depth) ** 2,
ray_indices=ray_indices,
n_rays=n_rays,
)
if bg_color is None:
bg_color = comp_rgb_bg
else:
if bg_color.shape[:-1] == (batch_size,):
# e.g. constant random color used for Zero123
# [bs,3] -> [bs, 1, 1, 3]):
bg_color = bg_color.unsqueeze(1).unsqueeze(1)
# -> [bs, height, width, 3]):
bg_color = bg_color.expand(-1, height, width, -1)
if bg_color.shape[:-1] == (batch_size, height, width):
bg_color = bg_color.reshape(batch_size * height * width, -1)
comp_rgb = comp_rgb_fg + bg_color * (1.0 - opacity)
out = {
"comp_rgb": comp_rgb.view(batch_size, height, width, -1),
"comp_rgb_fg": comp_rgb_fg.view(batch_size, height, width, -1),
"comp_rgb_bg": comp_rgb_bg.view(batch_size, height, width, -1),
"opacity": opacity.view(batch_size, height, width, 1),
"depth": depth.view(batch_size, height, width, 1),
"z_variance": z_variance.view(batch_size, height, width, 1),
}
if self.training:
out.update(
{
"weights": weights,
"t_points": t_positions,
"t_intervals": t_intervals,
"t_dirs": t_dirs,
"ray_indices": ray_indices,
"points": positions,
**geo_out,
}
)
if "normal" in geo_out:
if self.cfg.return_comp_normal:
comp_normal: Float[Tensor, "Nr 3"] = nerfacc.accumulate_along_rays(
weights[..., 0],
values=geo_out["normal"],
ray_indices=ray_indices,
n_rays=n_rays,
)
comp_normal = F.normalize(comp_normal, dim=-1)
comp_normal = (
(comp_normal + 1.0) / 2.0 * opacity
) # for visualization
out.update(
{
"comp_normal": comp_normal.view(
batch_size, height, width, 3
),
}
)
if self.cfg.return_normal_perturb:
normal_perturb = self.geometry(
positions + torch.randn_like(positions) * 1e-2,
output_normal=self.material.requires_normal,
)["normal"]
out.update({"normal_perturb": normal_perturb})
else:
if "normal" in geo_out:
comp_normal = nerfacc.accumulate_along_rays(
weights[..., 0],
values=geo_out["normal"],
ray_indices=ray_indices,
n_rays=n_rays,
)
comp_normal = F.normalize(comp_normal, dim=-1)
comp_normal = (comp_normal + 1.0) / 2.0 * opacity # for visualization
out.update(
{
"comp_normal": comp_normal.view(batch_size, height, width, 3),
}
)
return out
def update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
) -> None:
if self.cfg.estimator == "occgrid":
if self.cfg.grid_prune:
def occ_eval_fn(x):
density = self.geometry.forward_density(x)
# approximate for 1 - torch.exp(-density * self.render_step_size) based on taylor series
return density * self.render_step_size
if self.training and not on_load_weights:
self.estimator.update_every_n_steps(
step=global_step, occ_eval_fn=occ_eval_fn
)
elif self.cfg.estimator == "proposal":
if self.training:
requires_grad = self.proposal_requires_grad_fn(global_step)
self.vars_in_forward["requires_grad"] = requires_grad
else:
self.vars_in_forward["requires_grad"] = False
def update_step_end(self, epoch: int, global_step: int) -> None:
if self.cfg.estimator == "proposal" and self.training:
self.estimator.update_every_n_steps(
self.vars_in_forward["trans"],
self.vars_in_forward["requires_grad"],
loss_scaler=1.0,
)
def train(self, mode=True):
self.randomized = mode and self.cfg.randomized
if self.cfg.estimator == "proposal":
self.prop_net.train()
return super().train(mode=mode)
def eval(self):
self.randomized = False
if self.cfg.estimator == "proposal":
self.prop_net.eval()
return super().eval()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/patch_renderer.py | threestudio/models/renderers/patch_renderer.py | from dataclasses import dataclass
import torch
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.utils.typing import *
@threestudio.register("patch-renderer")
class PatchRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
patch_size: int = 128
base_renderer_type: str = ""
base_renderer: Optional[VolumeRenderer.Config] = None
global_detach: bool = False
global_downsample: int = 4
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
self.base_renderer = threestudio.find(self.cfg.base_renderer_type)(
self.cfg.base_renderer,
geometry=geometry,
material=material,
background=background,
)
def forward(
self,
rays_o: Float[Tensor, "B H W 3"],
rays_d: Float[Tensor, "B H W 3"],
light_positions: Float[Tensor, "B 3"],
bg_color: Optional[Tensor] = None,
**kwargs
) -> Dict[str, Float[Tensor, "..."]]:
B, H, W, _ = rays_o.shape
if self.base_renderer.training:
downsample = self.cfg.global_downsample
global_rays_o = torch.nn.functional.interpolate(
rays_o.permute(0, 3, 1, 2),
(H // downsample, W // downsample),
mode="bilinear",
).permute(0, 2, 3, 1)
global_rays_d = torch.nn.functional.interpolate(
rays_d.permute(0, 3, 1, 2),
(H // downsample, W // downsample),
mode="bilinear",
).permute(0, 2, 3, 1)
out_global = self.base_renderer(
global_rays_o, global_rays_d, light_positions, bg_color, **kwargs
)
PS = self.cfg.patch_size
patch_x = torch.randint(0, W - PS, (1,)).item()
patch_y = torch.randint(0, H - PS, (1,)).item()
patch_rays_o = rays_o[:, patch_y : patch_y + PS, patch_x : patch_x + PS]
patch_rays_d = rays_d[:, patch_y : patch_y + PS, patch_x : patch_x + PS]
out = self.base_renderer(
patch_rays_o, patch_rays_d, light_positions, bg_color, **kwargs
)
valid_patch_key = []
for key in out:
if torch.is_tensor(out[key]):
if len(out[key].shape) == len(out["comp_rgb"].shape):
if out[key][..., 0].shape == out["comp_rgb"][..., 0].shape:
valid_patch_key.append(key)
for key in valid_patch_key:
out_global[key] = F.interpolate(
out_global[key].permute(0, 3, 1, 2), (H, W), mode="bilinear"
).permute(0, 2, 3, 1)
if self.cfg.global_detach:
out_global[key] = out_global[key].detach()
out_global[key][
:, patch_y : patch_y + PS, patch_x : patch_x + PS
] = out[key]
out = out_global
else:
out = self.base_renderer(
rays_o, rays_d, light_positions, bg_color, **kwargs
)
return out
def update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
) -> None:
self.base_renderer.update_step(epoch, global_step, on_load_weights)
def train(self, mode=True):
return self.base_renderer.train(mode)
def eval(self):
return self.base_renderer.eval()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/__init__.py | threestudio/models/renderers/__init__.py | from . import (
base,
deferred_volume_renderer,
gan_volume_renderer,
nerf_volume_renderer,
neus_volume_renderer,
nvdiff_rasterizer,
patch_renderer,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/base.py | threestudio/models/renderers/base.py | from dataclasses import dataclass
import nerfacc
import torch
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.utils.base import BaseModule
from threestudio.utils.typing import *
class Renderer(BaseModule):
@dataclass
class Config(BaseModule.Config):
radius: float = 1.0
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
# keep references to submodules using namedtuple, avoid being registered as modules
@dataclass
class SubModules:
geometry: BaseImplicitGeometry
material: BaseMaterial
background: BaseBackground
self.sub_modules = SubModules(geometry, material, background)
# set up bounding box
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
def forward(self, *args, **kwargs) -> Dict[str, Any]:
raise NotImplementedError
@property
def geometry(self) -> BaseImplicitGeometry:
return self.sub_modules.geometry
@property
def material(self) -> BaseMaterial:
return self.sub_modules.material
@property
def background(self) -> BaseBackground:
return self.sub_modules.background
def set_geometry(self, geometry: BaseImplicitGeometry) -> None:
self.sub_modules.geometry = geometry
def set_material(self, material: BaseMaterial) -> None:
self.sub_modules.material = material
def set_background(self, background: BaseBackground) -> None:
self.sub_modules.background = background
class VolumeRenderer(Renderer):
pass
class Rasterizer(Renderer):
pass
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/renderers/gan_volume_renderer.py | threestudio/models/renderers/gan_volume_renderer.py | from dataclasses import dataclass
import torch
import torch.nn.functional as F
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.utils.GAN.discriminator import NLayerDiscriminator, weights_init
from threestudio.utils.GAN.distribution import DiagonalGaussianDistribution
from threestudio.utils.GAN.mobilenet import MobileNetV3 as GlobalEncoder
from threestudio.utils.GAN.vae import Decoder as Generator
from threestudio.utils.GAN.vae import Encoder as LocalEncoder
from threestudio.utils.typing import *
@threestudio.register("gan-volume-renderer")
class GANVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
base_renderer_type: str = ""
base_renderer: Optional[VolumeRenderer.Config] = None
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
self.base_renderer = threestudio.find(self.cfg.base_renderer_type)(
self.cfg.base_renderer,
geometry=geometry,
material=material,
background=background,
)
self.ch_mult = [1, 2, 4]
self.generator = Generator(
ch=64,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=7,
resolution=512,
z_channels=4,
)
self.local_encoder = LocalEncoder(
ch=32,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=3,
resolution=512,
z_channels=4,
)
self.global_encoder = GlobalEncoder(n_class=64)
self.discriminator = NLayerDiscriminator(
input_nc=3, n_layers=3, use_actnorm=False, ndf=64
).apply(weights_init)
def forward(
self,
rays_o: Float[Tensor, "B H W 3"],
rays_d: Float[Tensor, "B H W 3"],
light_positions: Float[Tensor, "B 3"],
bg_color: Optional[Tensor] = None,
gt_rgb: Float[Tensor, "B H W 3"] = None,
multi_level_guidance: Bool = False,
**kwargs
) -> Dict[str, Float[Tensor, "..."]]:
B, H, W, _ = rays_o.shape
if gt_rgb is not None and multi_level_guidance:
generator_level = torch.randint(0, 3, (1,)).item()
interval_x = torch.randint(0, 8, (1,)).item()
interval_y = torch.randint(0, 8, (1,)).item()
int_rays_o = rays_o[:, interval_y::8, interval_x::8]
int_rays_d = rays_d[:, interval_y::8, interval_x::8]
out = self.base_renderer(
int_rays_o, int_rays_d, light_positions, bg_color, **kwargs
)
comp_int_rgb = out["comp_rgb"][..., :3]
comp_gt_rgb = gt_rgb[:, interval_y::8, interval_x::8]
else:
generator_level = 0
scale_ratio = 2 ** (len(self.ch_mult) - 1)
rays_o = torch.nn.functional.interpolate(
rays_o.permute(0, 3, 1, 2),
(H // scale_ratio, W // scale_ratio),
mode="bilinear",
).permute(0, 2, 3, 1)
rays_d = torch.nn.functional.interpolate(
rays_d.permute(0, 3, 1, 2),
(H // scale_ratio, W // scale_ratio),
mode="bilinear",
).permute(0, 2, 3, 1)
out = self.base_renderer(rays_o, rays_d, light_positions, bg_color, **kwargs)
comp_rgb = out["comp_rgb"][..., :3]
latent = out["comp_rgb"][..., 3:]
out["comp_lr_rgb"] = comp_rgb.clone()
posterior = DiagonalGaussianDistribution(latent.permute(0, 3, 1, 2))
if multi_level_guidance:
z_map = posterior.sample()
else:
z_map = posterior.mode()
lr_rgb = comp_rgb.permute(0, 3, 1, 2)
if generator_level == 0:
g_code_rgb = self.global_encoder(F.interpolate(lr_rgb, (224, 224)))
comp_gan_rgb = self.generator(torch.cat([lr_rgb, z_map], dim=1), g_code_rgb)
elif generator_level == 1:
g_code_rgb = self.global_encoder(
F.interpolate(gt_rgb.permute(0, 3, 1, 2), (224, 224))
)
comp_gan_rgb = self.generator(torch.cat([lr_rgb, z_map], dim=1), g_code_rgb)
elif generator_level == 2:
g_code_rgb = self.global_encoder(
F.interpolate(gt_rgb.permute(0, 3, 1, 2), (224, 224))
)
l_code_rgb = self.local_encoder(gt_rgb.permute(0, 3, 1, 2))
posterior = DiagonalGaussianDistribution(l_code_rgb)
z_map = posterior.sample()
comp_gan_rgb = self.generator(torch.cat([lr_rgb, z_map], dim=1), g_code_rgb)
comp_rgb = F.interpolate(comp_rgb.permute(0, 3, 1, 2), (H, W), mode="bilinear")
comp_gan_rgb = F.interpolate(comp_gan_rgb, (H, W), mode="bilinear")
out.update(
{
"posterior": posterior,
"comp_gan_rgb": comp_gan_rgb.permute(0, 2, 3, 1),
"comp_rgb": comp_rgb.permute(0, 2, 3, 1),
"generator_level": generator_level,
}
)
if gt_rgb is not None and multi_level_guidance:
out.update({"comp_int_rgb": comp_int_rgb, "comp_gt_rgb": comp_gt_rgb})
return out
def update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
) -> None:
self.base_renderer.update_step(epoch, global_step, on_load_weights)
def train(self, mode=True):
return self.base_renderer.train(mode)
def eval(self):
return self.base_renderer.eval()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/geometry/implicit_volume.py | threestudio/models/geometry/implicit_volume.py | from dataclasses import dataclass, field
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.geometry.base import (
BaseGeometry,
BaseImplicitGeometry,
contract_to_unisphere,
)
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
@threestudio.register("implicit-volume")
class ImplicitVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
cfg: Config
def configure(self) -> None:
super().configure()
self.encoding = get_encoding(
self.cfg.n_input_dims, self.cfg.pos_encoding_config
)
self.density_network = get_mlp(
self.encoding.n_output_dims, 1, self.cfg.mlp_network_config
)
if self.cfg.n_feature_dims > 0:
self.feature_network = get_mlp(
self.encoding.n_output_dims,
self.cfg.n_feature_dims,
self.cfg.mlp_network_config,
)
if self.cfg.normal_type == "pred":
self.normal_network = get_mlp(
self.encoding.n_output_dims, 3, self.cfg.mlp_network_config
)
def get_activated_density(
self, points: Float[Tensor, "*N Di"], density: Float[Tensor, "*N 1"]
) -> Tuple[Float[Tensor, "*N 1"], Float[Tensor, "*N 1"]]:
density_bias: Union[float, Float[Tensor, "*N 1"]]
if self.cfg.density_bias == "blob_dreamfusion":
# pre-activation density bias
density_bias = (
self.cfg.density_blob_scale
* torch.exp(
-0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2
)[..., None]
)
elif self.cfg.density_bias == "blob_magic3d":
# pre-activation density bias
density_bias = (
self.cfg.density_blob_scale
* (
1
- torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std
)[..., None]
)
elif isinstance(self.cfg.density_bias, float):
density_bias = self.cfg.density_bias
else:
raise ValueError(f"Unknown density bias {self.cfg.density_bias}")
raw_density: Float[Tensor, "*N 1"] = density + density_bias
density = get_activation(self.cfg.density_activation)(raw_density)
return raw_density, density
def forward(
self, points: Float[Tensor, "*N Di"], output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
grad_enabled = torch.is_grad_enabled()
if output_normal and self.cfg.normal_type == "analytic":
torch.set_grad_enabled(True)
points.requires_grad_(True)
points_unscaled = points # points in the original scale
points = contract_to_unisphere(
points, self.bbox, self.unbounded
) # points normalized to (0, 1)
enc = self.encoding(points.view(-1, self.cfg.n_input_dims))
density = self.density_network(enc).view(*points.shape[:-1], 1)
raw_density, density = self.get_activated_density(points_unscaled, density)
output = {
"density": density,
}
if self.cfg.n_feature_dims > 0:
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
output.update({"features": features})
if output_normal:
if (
self.cfg.normal_type == "finite_difference"
or self.cfg.normal_type == "finite_difference_laplacian"
):
# TODO: use raw density
eps = self.cfg.finite_difference_normal_eps
if self.cfg.normal_type == "finite_difference_laplacian":
offsets: Float[Tensor, "6 3"] = torch.as_tensor(
[
[eps, 0.0, 0.0],
[-eps, 0.0, 0.0],
[0.0, eps, 0.0],
[0.0, -eps, 0.0],
[0.0, 0.0, eps],
[0.0, 0.0, -eps],
]
).to(points_unscaled)
points_offset: Float[Tensor, "... 6 3"] = (
points_unscaled[..., None, :] + offsets
).clamp(-self.cfg.radius, self.cfg.radius)
density_offset: Float[Tensor, "... 6 1"] = self.forward_density(
points_offset
)
normal = (
-0.5
* (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])
/ eps
)
else:
offsets: Float[Tensor, "3 3"] = torch.as_tensor(
[[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]
).to(points_unscaled)
points_offset: Float[Tensor, "... 3 3"] = (
points_unscaled[..., None, :] + offsets
).clamp(-self.cfg.radius, self.cfg.radius)
density_offset: Float[Tensor, "... 3 1"] = self.forward_density(
points_offset
)
normal = -(density_offset[..., 0::1, 0] - density) / eps
normal = F.normalize(normal, dim=-1)
elif self.cfg.normal_type == "pred":
normal = self.normal_network(enc).view(*points.shape[:-1], 3)
normal = F.normalize(normal, dim=-1)
elif self.cfg.normal_type == "analytic":
normal = -torch.autograd.grad(
density,
points_unscaled,
grad_outputs=torch.ones_like(density),
create_graph=True,
)[0]
normal = F.normalize(normal, dim=-1)
if not grad_enabled:
normal = normal.detach()
else:
raise AttributeError(f"Unknown normal type {self.cfg.normal_type}")
output.update({"normal": normal, "shading_normal": normal})
torch.set_grad_enabled(grad_enabled)
return output
def forward_density(self, points: Float[Tensor, "*N Di"]) -> Float[Tensor, "*N 1"]:
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
density = self.density_network(
self.encoding(points.reshape(-1, self.cfg.n_input_dims))
).reshape(*points.shape[:-1], 1)
_, density = self.get_activated_density(points_unscaled, density)
return density
def forward_field(
self, points: Float[Tensor, "*N Di"]
) -> Tuple[Float[Tensor, "*N 1"], Optional[Float[Tensor, "*N 3"]]]:
if self.cfg.isosurface_deformable_grid:
threestudio.warn(
f"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring."
)
density = self.forward_density(points)
return density, None
def forward_level(
self, field: Float[Tensor, "*N 1"], threshold: float
) -> Float[Tensor, "*N 1"]:
return -(field - threshold)
def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str, Any]:
out: Dict[str, Any] = {}
if self.cfg.n_feature_dims == 0:
return out
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
out.update(
{
"features": features,
}
)
return out
@staticmethod
@torch.no_grad()
def create_from(
other: BaseGeometry,
cfg: Optional[Union[dict, DictConfig]] = None,
copy_net: bool = True,
**kwargs,
) -> "ImplicitVolume":
if isinstance(other, ImplicitVolume):
instance = ImplicitVolume(cfg, **kwargs)
instance.encoding.load_state_dict(other.encoding.state_dict())
instance.density_network.load_state_dict(other.density_network.state_dict())
if copy_net:
if (
instance.cfg.n_feature_dims > 0
and other.cfg.n_feature_dims == instance.cfg.n_feature_dims
):
instance.feature_network.load_state_dict(
other.feature_network.state_dict()
)
if (
instance.cfg.normal_type == "pred"
and other.cfg.normal_type == "pred"
):
instance.normal_network.load_state_dict(
other.normal_network.state_dict()
)
return instance
else:
raise TypeError(
f"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}"
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/geometry/tetrahedra_sdf_grid.py | threestudio/models/geometry/tetrahedra_sdf_grid.py | import os
from dataclasses import dataclass, field
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.geometry.base import (
BaseExplicitGeometry,
BaseGeometry,
contract_to_unisphere,
)
from threestudio.models.geometry.implicit_sdf import ImplicitSDF
from threestudio.models.geometry.implicit_volume import ImplicitVolume
from threestudio.models.isosurface import MarchingTetrahedraHelper
from threestudio.models.mesh import Mesh
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.misc import broadcast
from threestudio.utils.ops import scale_tensor
from threestudio.utils.typing import *
@threestudio.register("tetrahedra-sdf-grid")
class TetrahedraSDFGrid(BaseExplicitGeometry):
@dataclass
class Config(BaseExplicitGeometry.Config):
isosurface_resolution: int = 128
isosurface_deformable_grid: bool = True
isosurface_remove_outliers: bool = False
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
n_input_dims: int = 3
n_feature_dims: int = 3
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
shape_init: Optional[str] = None
shape_init_params: Optional[Any] = None
shape_init_mesh_up: str = "+z"
shape_init_mesh_front: str = "+x"
force_shape_init: bool = False
geometry_only: bool = False
fix_geometry: bool = False
cfg: Config
def configure(self) -> None:
super().configure()
# this should be saved to state_dict, register as buffer
self.isosurface_bbox: Float[Tensor, "2 3"]
self.register_buffer("isosurface_bbox", self.bbox.clone())
self.isosurface_helper = MarchingTetrahedraHelper(
self.cfg.isosurface_resolution,
f"load/tets/{self.cfg.isosurface_resolution}_tets.npz",
)
self.sdf: Float[Tensor, "Nv 1"]
self.deformation: Optional[Float[Tensor, "Nv 3"]]
if not self.cfg.fix_geometry:
self.register_parameter(
"sdf",
nn.Parameter(
torch.zeros(
(self.isosurface_helper.grid_vertices.shape[0], 1),
dtype=torch.float32,
)
),
)
if self.cfg.isosurface_deformable_grid:
self.register_parameter(
"deformation",
nn.Parameter(
torch.zeros_like(self.isosurface_helper.grid_vertices)
),
)
else:
self.deformation = None
else:
self.register_buffer(
"sdf",
torch.zeros(
(self.isosurface_helper.grid_vertices.shape[0], 1),
dtype=torch.float32,
),
)
if self.cfg.isosurface_deformable_grid:
self.register_buffer(
"deformation",
torch.zeros_like(self.isosurface_helper.grid_vertices),
)
else:
self.deformation = None
if not self.cfg.geometry_only:
self.encoding = get_encoding(
self.cfg.n_input_dims, self.cfg.pos_encoding_config
)
self.feature_network = get_mlp(
self.encoding.n_output_dims,
self.cfg.n_feature_dims,
self.cfg.mlp_network_config,
)
self.mesh: Optional[Mesh] = None
def initialize_shape(self) -> None:
if self.cfg.shape_init is None and not self.cfg.force_shape_init:
return
# do not initialize shape if weights are provided
if self.cfg.weights is not None and not self.cfg.force_shape_init:
return
get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]]
assert isinstance(self.cfg.shape_init, str)
if self.cfg.shape_init == "ellipsoid":
assert (
isinstance(self.cfg.shape_init_params, Sized)
and len(self.cfg.shape_init_params) == 3
)
size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return ((points_rand / size) ** 2).sum(
dim=-1, keepdim=True
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid
get_gt_sdf = func
elif self.cfg.shape_init == "sphere":
assert isinstance(self.cfg.shape_init_params, float)
radius = self.cfg.shape_init_params
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius
get_gt_sdf = func
elif self.cfg.shape_init.startswith("mesh:"):
assert isinstance(self.cfg.shape_init_params, float)
mesh_path = self.cfg.shape_init[5:]
if not os.path.exists(mesh_path):
raise ValueError(f"Mesh file {mesh_path} does not exist.")
import trimesh
mesh = trimesh.load(mesh_path)
# move to center
centroid = mesh.vertices.mean(0)
mesh.vertices = mesh.vertices - centroid
# align to up-z and front-x
dirs = ["+x", "+y", "+z", "-x", "-y", "-z"]
dir2vec = {
"+x": np.array([1, 0, 0]),
"+y": np.array([0, 1, 0]),
"+z": np.array([0, 0, 1]),
"-x": np.array([-1, 0, 0]),
"-y": np.array([0, -1, 0]),
"-z": np.array([0, 0, -1]),
}
if (
self.cfg.shape_init_mesh_up not in dirs
or self.cfg.shape_init_mesh_front not in dirs
):
raise ValueError(
f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}."
)
if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:
raise ValueError(
"shape_init_mesh_up and shape_init_mesh_front must be orthogonal."
)
z_, x_ = (
dir2vec[self.cfg.shape_init_mesh_up],
dir2vec[self.cfg.shape_init_mesh_front],
)
y_ = np.cross(z_, x_)
std2mesh = np.stack([x_, y_, z_], axis=0).T
mesh2std = np.linalg.inv(std2mesh)
# scaling
scale = np.abs(mesh.vertices).max()
mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params
mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T
from pysdf import SDF
sdf = SDF(mesh.vertices, mesh.faces)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
# add a negative signed here
# as in pysdf the inside of the shape has positive signed distance
return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(
points_rand
)[..., None]
get_gt_sdf = func
else:
raise ValueError(
f"Unknown shape initialization type: {self.cfg.shape_init}"
)
sdf_gt = get_gt_sdf(
scale_tensor(
self.isosurface_helper.grid_vertices,
self.isosurface_helper.points_range,
self.isosurface_bbox,
)
)
self.sdf.data = sdf_gt
# explicit broadcast to ensure param consistency across ranks
for param in self.parameters():
broadcast(param, src=0)
def isosurface(self) -> Mesh:
# return cached mesh if fix_geometry is True to save computation
if self.cfg.fix_geometry and self.mesh is not None:
return self.mesh
mesh = self.isosurface_helper(self.sdf, self.deformation)
mesh.v_pos = scale_tensor(
mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox
)
if self.cfg.isosurface_remove_outliers:
mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)
self.mesh = mesh
return mesh
def forward(
self, points: Float[Tensor, "*N Di"], output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
if self.cfg.geometry_only:
return {}
assert (
output_normal == False
), f"Normal output is not supported for {self.__class__.__name__}"
points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
enc = self.encoding(points.view(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
return {"features": features}
@staticmethod
@torch.no_grad()
def create_from(
other: BaseGeometry,
cfg: Optional[Union[dict, DictConfig]] = None,
copy_net: bool = True,
**kwargs,
) -> "TetrahedraSDFGrid":
if isinstance(other, TetrahedraSDFGrid):
instance = TetrahedraSDFGrid(cfg, **kwargs)
assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution
instance.isosurface_bbox = other.isosurface_bbox.clone()
instance.sdf.data = other.sdf.data.clone()
if (
instance.cfg.isosurface_deformable_grid
and other.cfg.isosurface_deformable_grid
):
assert (
instance.deformation is not None and other.deformation is not None
)
instance.deformation.data = other.deformation.data.clone()
if (
not instance.cfg.geometry_only
and not other.cfg.geometry_only
and copy_net
):
instance.encoding.load_state_dict(other.encoding.state_dict())
instance.feature_network.load_state_dict(
other.feature_network.state_dict()
)
return instance
elif isinstance(other, ImplicitVolume):
instance = TetrahedraSDFGrid(cfg, **kwargs)
if other.cfg.isosurface_method != "mt":
other.cfg.isosurface_method = "mt"
threestudio.warn(
f"Override isosurface_method of the source geometry to 'mt'"
)
if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution:
other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution
threestudio.warn(
f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}"
)
mesh = other.isosurface()
instance.isosurface_bbox = mesh.extras["bbox"]
instance.sdf.data = (
mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1)
)
if not instance.cfg.geometry_only and copy_net:
instance.encoding.load_state_dict(other.encoding.state_dict())
instance.feature_network.load_state_dict(
other.feature_network.state_dict()
)
return instance
elif isinstance(other, ImplicitSDF):
instance = TetrahedraSDFGrid(cfg, **kwargs)
if other.cfg.isosurface_method != "mt":
other.cfg.isosurface_method = "mt"
threestudio.warn(
f"Override isosurface_method of the source geometry to 'mt'"
)
if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution:
other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution
threestudio.warn(
f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}"
)
mesh = other.isosurface()
instance.isosurface_bbox = mesh.extras["bbox"]
instance.sdf.data = mesh.extras["grid_level"].to(instance.sdf.data)
if (
instance.cfg.isosurface_deformable_grid
and other.cfg.isosurface_deformable_grid
):
assert instance.deformation is not None
instance.deformation.data = mesh.extras["grid_deformation"].to(
instance.deformation.data
)
if not instance.cfg.geometry_only and copy_net:
instance.encoding.load_state_dict(other.encoding.state_dict())
instance.feature_network.load_state_dict(
other.feature_network.state_dict()
)
return instance
else:
raise TypeError(
f"Cannot create {TetrahedraSDFGrid.__name__} from {other.__class__.__name__}"
)
def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str, Any]:
out: Dict[str, Any] = {}
if self.cfg.geometry_only or self.cfg.n_feature_dims == 0:
return out
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox)
enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
out.update(
{
"features": features,
}
)
return out
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/geometry/custom_mesh.py | threestudio/models/geometry/custom_mesh.py | import os
from dataclasses import dataclass, field
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.geometry.base import (
BaseExplicitGeometry,
BaseGeometry,
contract_to_unisphere,
)
from threestudio.models.mesh import Mesh
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import scale_tensor
from threestudio.utils.typing import *
@threestudio.register("custom-mesh")
class CustomMesh(BaseExplicitGeometry):
@dataclass
class Config(BaseExplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
shape_init: str = ""
shape_init_params: Optional[Any] = None
shape_init_mesh_up: str = "+z"
shape_init_mesh_front: str = "+x"
cfg: Config
def configure(self) -> None:
super().configure()
self.encoding = get_encoding(
self.cfg.n_input_dims, self.cfg.pos_encoding_config
)
self.feature_network = get_mlp(
self.encoding.n_output_dims,
self.cfg.n_feature_dims,
self.cfg.mlp_network_config,
)
# Initialize custom mesh
if self.cfg.shape_init.startswith("mesh:"):
assert isinstance(self.cfg.shape_init_params, float)
mesh_path = self.cfg.shape_init[5:]
if not os.path.exists(mesh_path):
raise ValueError(f"Mesh file {mesh_path} does not exist.")
import trimesh
scene = trimesh.load(mesh_path)
if isinstance(scene, trimesh.Trimesh):
mesh = scene
elif isinstance(scene, trimesh.scene.Scene):
mesh = trimesh.Trimesh()
for obj in scene.geometry.values():
mesh = trimesh.util.concatenate([mesh, obj])
else:
raise ValueError(f"Unknown mesh type at {mesh_path}.")
# move to center
centroid = mesh.vertices.mean(0)
mesh.vertices = mesh.vertices - centroid
# align to up-z and front-x
dirs = ["+x", "+y", "+z", "-x", "-y", "-z"]
dir2vec = {
"+x": np.array([1, 0, 0]),
"+y": np.array([0, 1, 0]),
"+z": np.array([0, 0, 1]),
"-x": np.array([-1, 0, 0]),
"-y": np.array([0, -1, 0]),
"-z": np.array([0, 0, -1]),
}
if (
self.cfg.shape_init_mesh_up not in dirs
or self.cfg.shape_init_mesh_front not in dirs
):
raise ValueError(
f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}."
)
if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:
raise ValueError(
"shape_init_mesh_up and shape_init_mesh_front must be orthogonal."
)
z_, x_ = (
dir2vec[self.cfg.shape_init_mesh_up],
dir2vec[self.cfg.shape_init_mesh_front],
)
y_ = np.cross(z_, x_)
std2mesh = np.stack([x_, y_, z_], axis=0).T
mesh2std = np.linalg.inv(std2mesh)
# scaling
scale = np.abs(mesh.vertices).max()
mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params
mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T
v_pos = torch.tensor(mesh.vertices, dtype=torch.float32).to(self.device)
t_pos_idx = torch.tensor(mesh.faces, dtype=torch.int64).to(self.device)
self.mesh = Mesh(v_pos=v_pos, t_pos_idx=t_pos_idx)
self.register_buffer(
"v_buffer",
v_pos,
)
self.register_buffer(
"t_buffer",
t_pos_idx,
)
else:
raise ValueError(
f"Unknown shape initialization type: {self.cfg.shape_init}"
)
print(self.mesh.v_pos.device)
def isosurface(self) -> Mesh:
if hasattr(self, "mesh"):
return self.mesh
elif hasattr(self, "v_buffer"):
self.mesh = Mesh(v_pos=self.v_buffer, t_pos_idx=self.t_buffer)
return self.mesh
else:
raise ValueError(f"custom mesh is not initialized")
def forward(
self, points: Float[Tensor, "*N Di"], output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
assert (
output_normal == False
), f"Normal output is not supported for {self.__class__.__name__}"
points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
enc = self.encoding(points.view(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
return {"features": features}
def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str, Any]:
out: Dict[str, Any] = {}
if self.cfg.n_feature_dims == 0:
return out
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox)
enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
out.update(
{
"features": features,
}
)
return out
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/geometry/volume_grid.py | threestudio/models/geometry/volume_grid.py | from dataclasses import dataclass, field
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.geometry.base import BaseImplicitGeometry, contract_to_unisphere
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
@threestudio.register("volume-grid")
class VolumeGrid(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
grid_size: Tuple[int, int, int] = field(default_factory=lambda: (100, 100, 100))
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob"
density_blob_scale: float = 5.0
density_blob_std: float = 0.5
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
# automatically determine the threshold
isosurface_threshold: Union[float, str] = "auto"
cfg: Config
def configure(self) -> None:
super().configure()
self.grid_size = self.cfg.grid_size
self.grid = nn.Parameter(
torch.zeros(1, self.cfg.n_feature_dims + 1, *self.grid_size)
)
if self.cfg.density_bias == "blob":
self.register_buffer("density_scale", torch.tensor(0.0))
else:
self.density_scale = nn.Parameter(torch.tensor(0.0))
if self.cfg.normal_type == "pred":
self.normal_grid = nn.Parameter(torch.zeros(1, 3, *self.grid_size))
def get_density_bias(self, points: Float[Tensor, "*N Di"]):
if self.cfg.density_bias == "blob":
# density_bias: Float[Tensor, "*N 1"] = self.cfg.density_blob_scale * torch.exp(-0.5 * (points ** 2).sum(dim=-1) / self.cfg.density_blob_std ** 2)[...,None]
density_bias: Float[Tensor, "*N 1"] = (
self.cfg.density_blob_scale
* (
1
- torch.sqrt((points.detach() ** 2).sum(dim=-1))
/ self.cfg.density_blob_std
)[..., None]
)
return density_bias
elif isinstance(self.cfg.density_bias, float):
return self.cfg.density_bias
else:
raise AttributeError(f"Unknown density bias {self.cfg.density_bias}")
def get_trilinear_feature(
self, points: Float[Tensor, "*N Di"], grid: Float[Tensor, "1 Df G1 G2 G3"]
) -> Float[Tensor, "*N Df"]:
points_shape = points.shape[:-1]
df = grid.shape[1]
di = points.shape[-1]
out = F.grid_sample(
grid, points.view(1, 1, 1, -1, di), align_corners=False, mode="bilinear"
)
out = out.reshape(df, -1).T.reshape(*points_shape, df)
return out
def forward(
self, points: Float[Tensor, "*N Di"], output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
points_unscaled = points # points in the original scale
points = contract_to_unisphere(
points, self.bbox, self.unbounded
) # points normalized to (0, 1)
points = points * 2 - 1 # convert to [-1, 1] for grid sample
out = self.get_trilinear_feature(points, self.grid)
density, features = out[..., 0:1], out[..., 1:]
density = density * torch.exp(self.density_scale) # exp scaling in DreamFusion
# breakpoint()
density = get_activation(self.cfg.density_activation)(
density + self.get_density_bias(points_unscaled)
)
output = {
"density": density,
"features": features,
}
if output_normal:
if (
self.cfg.normal_type == "finite_difference"
or self.cfg.normal_type == "finite_difference_laplacian"
):
eps = 1.0e-3
if self.cfg.normal_type == "finite_difference_laplacian":
offsets: Float[Tensor, "6 3"] = torch.as_tensor(
[
[eps, 0.0, 0.0],
[-eps, 0.0, 0.0],
[0.0, eps, 0.0],
[0.0, -eps, 0.0],
[0.0, 0.0, eps],
[0.0, 0.0, -eps],
]
).to(points_unscaled)
points_offset: Float[Tensor, "... 6 3"] = (
points_unscaled[..., None, :] + offsets
).clamp(-self.cfg.radius, self.cfg.radius)
density_offset: Float[Tensor, "... 6 1"] = self.forward_density(
points_offset
)
normal = (
-0.5
* (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])
/ eps
)
else:
offsets: Float[Tensor, "3 3"] = torch.as_tensor(
[[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]
).to(points_unscaled)
points_offset: Float[Tensor, "... 3 3"] = (
points_unscaled[..., None, :] + offsets
).clamp(-self.cfg.radius, self.cfg.radius)
density_offset: Float[Tensor, "... 3 1"] = self.forward_density(
points_offset
)
normal = -(density_offset[..., 0::1, 0] - density) / eps
normal = F.normalize(normal, dim=-1)
elif self.cfg.normal_type == "pred":
normal = self.get_trilinear_feature(points, self.normal_grid)
normal = F.normalize(normal, dim=-1)
else:
raise AttributeError(f"Unknown normal type {self.cfg.normal_type}")
output.update({"normal": normal, "shading_normal": normal})
return output
def forward_density(self, points: Float[Tensor, "*N Di"]) -> Float[Tensor, "*N 1"]:
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
points = points * 2 - 1 # convert to [-1, 1] for grid sample
out = self.get_trilinear_feature(points, self.grid)
density = out[..., 0:1]
density = density * torch.exp(self.density_scale)
density = get_activation(self.cfg.density_activation)(
density + self.get_density_bias(points_unscaled)
)
return density
def forward_field(
self, points: Float[Tensor, "*N Di"]
) -> Tuple[Float[Tensor, "*N 1"], Optional[Float[Tensor, "*N 3"]]]:
if self.cfg.isosurface_deformable_grid:
threestudio.warn(
f"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring."
)
density = self.forward_density(points)
return density, None
def forward_level(
self, field: Float[Tensor, "*N 1"], threshold: float
) -> Float[Tensor, "*N 1"]:
return -(field - threshold)
def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str, Any]:
out: Dict[str, Any] = {}
if self.cfg.n_feature_dims == 0:
return out
points_unscaled = points
points = contract_to_unisphere(points, self.bbox, self.unbounded)
points = points * 2 - 1 # convert to [-1, 1] for grid sample
features = self.get_trilinear_feature(points, self.grid)[..., 1:]
out.update(
{
"features": features,
}
)
return out
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/geometry/implicit_sdf.py | threestudio/models/geometry/implicit_sdf.py | import os
from dataclasses import dataclass, field
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.geometry.base import BaseImplicitGeometry, contract_to_unisphere
from threestudio.models.mesh import Mesh
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.misc import broadcast, get_rank
from threestudio.utils.typing import *
@threestudio.register("implicit-sdf")
class ImplicitSDF(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: Union[
float, str
] = 0.01 # in [float, "progressive"]
shape_init: Optional[str] = None
shape_init_params: Optional[Any] = None
shape_init_mesh_up: str = "+z"
shape_init_mesh_front: str = "+x"
force_shape_init: bool = False
sdf_bias: Union[float, str] = 0.0
sdf_bias_params: Optional[Any] = None
# no need to removal outlier for SDF
isosurface_remove_outliers: bool = False
cfg: Config
def configure(self) -> None:
super().configure()
self.encoding = get_encoding(
self.cfg.n_input_dims, self.cfg.pos_encoding_config
)
self.sdf_network = get_mlp(
self.encoding.n_output_dims, 1, self.cfg.mlp_network_config
)
if self.cfg.n_feature_dims > 0:
self.feature_network = get_mlp(
self.encoding.n_output_dims,
self.cfg.n_feature_dims,
self.cfg.mlp_network_config,
)
if self.cfg.normal_type == "pred":
self.normal_network = get_mlp(
self.encoding.n_output_dims, 3, self.cfg.mlp_network_config
)
if self.cfg.isosurface_deformable_grid:
assert (
self.cfg.isosurface_method == "mt"
), "isosurface_deformable_grid only works with mt"
self.deformation_network = get_mlp(
self.encoding.n_output_dims, 3, self.cfg.mlp_network_config
)
self.finite_difference_normal_eps: Optional[float] = None
def initialize_shape(self) -> None:
if self.cfg.shape_init is None and not self.cfg.force_shape_init:
return
# do not initialize shape if weights are provided
if self.cfg.weights is not None and not self.cfg.force_shape_init:
return
if self.cfg.sdf_bias != 0.0:
threestudio.warn(
"shape_init and sdf_bias are both specified, which may lead to unexpected results."
)
get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]]
assert isinstance(self.cfg.shape_init, str)
if self.cfg.shape_init == "ellipsoid":
assert (
isinstance(self.cfg.shape_init_params, Sized)
and len(self.cfg.shape_init_params) == 3
)
size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return ((points_rand / size) ** 2).sum(
dim=-1, keepdim=True
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid
get_gt_sdf = func
elif self.cfg.shape_init == "sphere":
assert isinstance(self.cfg.shape_init_params, float)
radius = self.cfg.shape_init_params
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius
get_gt_sdf = func
elif self.cfg.shape_init.startswith("mesh:"):
assert isinstance(self.cfg.shape_init_params, float)
mesh_path = self.cfg.shape_init[5:]
if not os.path.exists(mesh_path):
raise ValueError(f"Mesh file {mesh_path} does not exist.")
import trimesh
scene = trimesh.load(mesh_path)
if isinstance(scene, trimesh.Trimesh):
mesh = scene
elif isinstance(scene, trimesh.scene.Scene):
mesh = trimesh.Trimesh()
for obj in scene.geometry.values():
mesh = trimesh.util.concatenate([mesh, obj])
else:
raise ValueError(f"Unknown mesh type at {mesh_path}.")
# move to center
centroid = mesh.vertices.mean(0)
mesh.vertices = mesh.vertices - centroid
# align to up-z and front-x
dirs = ["+x", "+y", "+z", "-x", "-y", "-z"]
dir2vec = {
"+x": np.array([1, 0, 0]),
"+y": np.array([0, 1, 0]),
"+z": np.array([0, 0, 1]),
"-x": np.array([-1, 0, 0]),
"-y": np.array([0, -1, 0]),
"-z": np.array([0, 0, -1]),
}
if (
self.cfg.shape_init_mesh_up not in dirs
or self.cfg.shape_init_mesh_front not in dirs
):
raise ValueError(
f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}."
)
if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:
raise ValueError(
"shape_init_mesh_up and shape_init_mesh_front must be orthogonal."
)
z_, x_ = (
dir2vec[self.cfg.shape_init_mesh_up],
dir2vec[self.cfg.shape_init_mesh_front],
)
y_ = np.cross(z_, x_)
std2mesh = np.stack([x_, y_, z_], axis=0).T
mesh2std = np.linalg.inv(std2mesh)
# scaling
scale = np.abs(mesh.vertices).max()
mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params
mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T
from pysdf import SDF
sdf = SDF(mesh.vertices, mesh.faces)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
# add a negative signed here
# as in pysdf the inside of the shape has positive signed distance
return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(
points_rand
)[..., None]
get_gt_sdf = func
else:
raise ValueError(
f"Unknown shape initialization type: {self.cfg.shape_init}"
)
# Initialize SDF to a given shape when no weights are provided or force_shape_init is True
optim = torch.optim.Adam(self.parameters(), lr=1e-3)
from tqdm import tqdm
for _ in tqdm(
range(1000),
desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:",
disable=get_rank() != 0,
):
points_rand = (
torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0
)
sdf_gt = get_gt_sdf(points_rand)
sdf_pred = self.forward_sdf(points_rand)
loss = F.mse_loss(sdf_pred, sdf_gt)
optim.zero_grad()
loss.backward()
optim.step()
# explicit broadcast to ensure param consistency across ranks
for param in self.parameters():
broadcast(param, src=0)
def get_shifted_sdf(
self, points: Float[Tensor, "*N Di"], sdf: Float[Tensor, "*N 1"]
) -> Float[Tensor, "*N 1"]:
sdf_bias: Union[float, Float[Tensor, "*N 1"]]
if self.cfg.sdf_bias == "ellipsoid":
assert (
isinstance(self.cfg.sdf_bias_params, Sized)
and len(self.cfg.sdf_bias_params) == 3
)
size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)
sdf_bias = ((points / size) ** 2).sum(
dim=-1, keepdim=True
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid
elif self.cfg.sdf_bias == "sphere":
assert isinstance(self.cfg.sdf_bias_params, float)
radius = self.cfg.sdf_bias_params
sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius
elif isinstance(self.cfg.sdf_bias, float):
sdf_bias = self.cfg.sdf_bias
else:
raise ValueError(f"Unknown sdf bias {self.cfg.sdf_bias}")
return sdf + sdf_bias
def forward(
self, points: Float[Tensor, "*N Di"], output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
grad_enabled = torch.is_grad_enabled()
if output_normal and self.cfg.normal_type == "analytic":
torch.set_grad_enabled(True)
points.requires_grad_(True)
points_unscaled = points # points in the original scale
points = contract_to_unisphere(
points, self.bbox, self.unbounded
) # points normalized to (0, 1)
enc = self.encoding(points.view(-1, self.cfg.n_input_dims))
sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)
sdf = self.get_shifted_sdf(points_unscaled, sdf)
output = {"sdf": sdf}
if self.cfg.n_feature_dims > 0:
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
output.update({"features": features})
if output_normal:
if (
self.cfg.normal_type == "finite_difference"
or self.cfg.normal_type == "finite_difference_laplacian"
):
assert self.finite_difference_normal_eps is not None
eps: float = self.finite_difference_normal_eps
if self.cfg.normal_type == "finite_difference_laplacian":
offsets: Float[Tensor, "6 3"] = torch.as_tensor(
[
[eps, 0.0, 0.0],
[-eps, 0.0, 0.0],
[0.0, eps, 0.0],
[0.0, -eps, 0.0],
[0.0, 0.0, eps],
[0.0, 0.0, -eps],
]
).to(points_unscaled)
points_offset: Float[Tensor, "... 6 3"] = (
points_unscaled[..., None, :] + offsets
).clamp(-self.cfg.radius, self.cfg.radius)
sdf_offset: Float[Tensor, "... 6 1"] = self.forward_sdf(
points_offset
)
sdf_grad = (
0.5
* (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])
/ eps
)
else:
offsets: Float[Tensor, "3 3"] = torch.as_tensor(
[[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]
).to(points_unscaled)
points_offset: Float[Tensor, "... 3 3"] = (
points_unscaled[..., None, :] + offsets
).clamp(-self.cfg.radius, self.cfg.radius)
sdf_offset: Float[Tensor, "... 3 1"] = self.forward_sdf(
points_offset
)
sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps
normal = F.normalize(sdf_grad, dim=-1)
elif self.cfg.normal_type == "pred":
normal = self.normal_network(enc).view(*points.shape[:-1], 3)
normal = F.normalize(normal, dim=-1)
sdf_grad = normal
elif self.cfg.normal_type == "analytic":
sdf_grad = -torch.autograd.grad(
sdf,
points_unscaled,
grad_outputs=torch.ones_like(sdf),
create_graph=True,
)[0]
normal = F.normalize(sdf_grad, dim=-1)
if not grad_enabled:
sdf_grad = sdf_grad.detach()
normal = normal.detach()
else:
raise AttributeError(f"Unknown normal type {self.cfg.normal_type}")
output.update(
{"normal": normal, "shading_normal": normal, "sdf_grad": sdf_grad}
)
return output
def forward_sdf(self, points: Float[Tensor, "*N Di"]) -> Float[Tensor, "*N 1"]:
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
sdf = self.sdf_network(
self.encoding(points.reshape(-1, self.cfg.n_input_dims))
).reshape(*points.shape[:-1], 1)
sdf = self.get_shifted_sdf(points_unscaled, sdf)
return sdf
def forward_field(
self, points: Float[Tensor, "*N Di"]
) -> Tuple[Float[Tensor, "*N 1"], Optional[Float[Tensor, "*N 3"]]]:
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))
sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)
sdf = self.get_shifted_sdf(points_unscaled, sdf)
deformation: Optional[Float[Tensor, "*N 3"]] = None
if self.cfg.isosurface_deformable_grid:
deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)
return sdf, deformation
def forward_level(
self, field: Float[Tensor, "*N 1"], threshold: float
) -> Float[Tensor, "*N 1"]:
return field - threshold
def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str, Any]:
out: Dict[str, Any] = {}
if self.cfg.n_feature_dims == 0:
return out
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
out.update(
{
"features": features,
}
)
return out
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
if (
self.cfg.normal_type == "finite_difference"
or self.cfg.normal_type == "finite_difference_laplacian"
):
if isinstance(self.cfg.finite_difference_normal_eps, float):
self.finite_difference_normal_eps = (
self.cfg.finite_difference_normal_eps
)
elif self.cfg.finite_difference_normal_eps == "progressive":
# progressive finite difference eps from Neuralangelo
# https://arxiv.org/abs/2306.03092
hg_conf: Any = self.cfg.pos_encoding_config
assert (
hg_conf.otype == "ProgressiveBandHashGrid"
), "finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid"
current_level = min(
hg_conf.start_level
+ max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,
hg_conf.n_levels,
)
grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (
current_level - 1
)
grid_size = 2 * self.cfg.radius / grid_res
if grid_size != self.finite_difference_normal_eps:
threestudio.info(
f"Update finite_difference_normal_eps to {grid_size}"
)
self.finite_difference_normal_eps = grid_size
else:
raise ValueError(
f"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}"
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/geometry/__init__.py | threestudio/models/geometry/__init__.py | from . import (
base,
custom_mesh,
implicit_sdf,
implicit_volume,
tetrahedra_sdf_grid,
volume_grid,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/geometry/base.py | threestudio/models/geometry/base.py | from dataclasses import dataclass, field
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.isosurface import (
IsosurfaceHelper,
MarchingCubeCPUHelper,
MarchingTetrahedraHelper,
)
from threestudio.models.mesh import Mesh
from threestudio.utils.base import BaseModule
from threestudio.utils.ops import chunk_batch, scale_tensor
from threestudio.utils.typing import *
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = False
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt":
self.isosurface_helper = MarchingTetrahedraHelper(
self.cfg.isosurface_resolution,
f"load/tets/{self.cfg.isosurface_resolution}_tets.npz",
).to(self.device)
else:
raise AttributeError(
"Unknown isosurface method {self.cfg.isosurface_method}"
)
def forward(
self, points: Float[Tensor, "*N Di"], output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
raise NotImplementedError
def forward_field(
self, points: Float[Tensor, "*N Di"]
) -> Tuple[Float[Tensor, "*N 1"], Optional[Float[Tensor, "*N 3"]]]:
# return the value of the implicit field, could be density / signed distance
# also return a deformation field if the grid vertices can be optimized
raise NotImplementedError
def forward_level(
self, field: Float[Tensor, "*N 1"], threshold: float
) -> Float[Tensor, "*N 1"]:
# return the value of the implicit field, where the zero level set represents the surface
raise NotImplementedError
def _isosurface(self, bbox: Float[Tensor, "2 3"], fine_stage: bool = False) -> Mesh:
def batch_func(x):
# scale to bbox as the input vertices are in [0, 1]
field, deformation = self.forward_field(
scale_tensor(
x.to(bbox.device), self.isosurface_helper.points_range, bbox
),
)
field = field.to(
x.device
) # move to the same device as the input (could be CPU)
if deformation is not None:
deformation = deformation.to(x.device)
return field, deformation
assert self.isosurface_helper is not None
field, deformation = chunk_batch(
batch_func,
self.cfg.isosurface_chunk,
self.isosurface_helper.grid_vertices,
)
threshold: float
if isinstance(self.cfg.isosurface_threshold, float):
threshold = self.cfg.isosurface_threshold
elif self.cfg.isosurface_threshold == "auto":
eps = 1.0e-5
threshold = field[field > eps].mean().item()
threestudio.info(
f"Automatically determined isosurface threshold: {threshold}"
)
else:
raise TypeError(
f"Unknown isosurface_threshold {self.cfg.isosurface_threshold}"
)
level = self.forward_level(field, threshold)
mesh: Mesh = self.isosurface_helper(level, deformation=deformation)
mesh.v_pos = scale_tensor(
mesh.v_pos, self.isosurface_helper.points_range, bbox
) # scale to bbox as the grid vertices are in [0, 1]
mesh.add_extra("bbox", bbox)
if self.cfg.isosurface_remove_outliers:
# remove outliers components with small number of faces
# only enabled when the mesh is not differentiable
mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)
return mesh
def isosurface(self) -> Mesh:
if not self.cfg.isosurface:
raise NotImplementedError(
"Isosurface is not enabled in the current configuration"
)
self._initilize_isosurface_helper()
if self.cfg.isosurface_coarse_to_fine:
threestudio.debug("First run isosurface to get a tight bounding box ...")
with torch.no_grad():
mesh_coarse = self._isosurface(self.bbox)
vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)
vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])
vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])
threestudio.debug("Run isosurface again with the tight bounding box ...")
mesh = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)
else:
mesh = self._isosurface(self.bbox)
return mesh
class BaseExplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/exporters/mesh_exporter.py | threestudio/models/exporters/mesh_exporter.py | from dataclasses import dataclass, field
import cv2
import numpy as np
import torch
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.mesh import Mesh
from threestudio.utils.rasterize import NVDiffRasterizerContext
from threestudio.utils.typing import *
@threestudio.register("mesh-exporter")
class MeshExporter(Exporter):
@dataclass
class Config(Exporter.Config):
fmt: str = "obj-mtl" # in ['obj-mtl', 'obj'], TODO: fbx
save_name: str = "model"
save_normal: bool = False
save_uv: bool = True
save_texture: bool = True
texture_size: int = 1024
texture_format: str = "jpg"
xatlas_chart_options: dict = field(default_factory=dict)
xatlas_pack_options: dict = field(default_factory=dict)
context_type: str = "gl"
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
self.ctx = NVDiffRasterizerContext(self.cfg.context_type, self.device)
def __call__(self) -> List[ExporterOutput]:
mesh: Mesh = self.geometry.isosurface()
if self.cfg.fmt == "obj-mtl":
return self.export_obj_with_mtl(mesh)
elif self.cfg.fmt == "obj":
return self.export_obj(mesh)
else:
raise ValueError(f"Unsupported mesh export format: {self.cfg.fmt}")
def export_obj_with_mtl(self, mesh: Mesh) -> List[ExporterOutput]:
params = {
"mesh": mesh,
"save_mat": True,
"save_normal": self.cfg.save_normal,
"save_uv": self.cfg.save_uv,
"save_vertex_color": False,
"map_Kd": None, # Base Color
"map_Ks": None, # Specular
"map_Bump": None, # Normal
# ref: https://en.wikipedia.org/wiki/Wavefront_.obj_file#Physically-based_Rendering
"map_Pm": None, # Metallic
"map_Pr": None, # Roughness
"map_format": self.cfg.texture_format,
}
if self.cfg.save_uv:
mesh.unwrap_uv(self.cfg.xatlas_chart_options, self.cfg.xatlas_pack_options)
if self.cfg.save_texture:
threestudio.info("Exporting textures ...")
assert self.cfg.save_uv, "save_uv must be True when save_texture is True"
# clip space transform
uv_clip = mesh.v_tex * 2.0 - 1.0
# pad to four component coordinate
uv_clip4 = torch.cat(
(
uv_clip,
torch.zeros_like(uv_clip[..., 0:1]),
torch.ones_like(uv_clip[..., 0:1]),
),
dim=-1,
)
# rasterize
rast, _ = self.ctx.rasterize_one(
uv_clip4, mesh.t_tex_idx, (self.cfg.texture_size, self.cfg.texture_size)
)
hole_mask = ~(rast[:, :, 3] > 0)
def uv_padding(image):
uv_padding_size = self.cfg.xatlas_pack_options.get("padding", 2)
inpaint_image = (
cv2.inpaint(
(image.detach().cpu().numpy() * 255).astype(np.uint8),
(hole_mask.detach().cpu().numpy() * 255).astype(np.uint8),
uv_padding_size,
cv2.INPAINT_TELEA,
)
/ 255.0
)
return torch.from_numpy(inpaint_image).to(image)
# Interpolate world space position
gb_pos, _ = self.ctx.interpolate_one(
mesh.v_pos, rast[None, ...], mesh.t_pos_idx
)
gb_pos = gb_pos[0]
# Sample out textures from MLP
geo_out = self.geometry.export(points=gb_pos)
mat_out = self.material.export(points=gb_pos, **geo_out)
threestudio.info(
"Perform UV padding on texture maps to avoid seams, may take a while ..."
)
if "albedo" in mat_out:
params["map_Kd"] = uv_padding(mat_out["albedo"])
else:
threestudio.warn(
"save_texture is True but no albedo texture found, using default white texture"
)
if "metallic" in mat_out:
params["map_Pm"] = uv_padding(mat_out["metallic"])
if "roughness" in mat_out:
params["map_Pr"] = uv_padding(mat_out["roughness"])
if "bump" in mat_out:
params["map_Bump"] = uv_padding(mat_out["bump"])
# TODO: map_Ks
return [
ExporterOutput(
save_name=f"{self.cfg.save_name}.obj", save_type="obj", params=params
)
]
def export_obj(self, mesh: Mesh) -> List[ExporterOutput]:
params = {
"mesh": mesh,
"save_mat": False,
"save_normal": self.cfg.save_normal,
"save_uv": self.cfg.save_uv,
"save_vertex_color": False,
"map_Kd": None, # Base Color
"map_Ks": None, # Specular
"map_Bump": None, # Normal
# ref: https://en.wikipedia.org/wiki/Wavefront_.obj_file#Physically-based_Rendering
"map_Pm": None, # Metallic
"map_Pr": None, # Roughness
"map_format": self.cfg.texture_format,
}
if self.cfg.save_uv:
mesh.unwrap_uv(self.cfg.xatlas_chart_options, self.cfg.xatlas_pack_options)
if self.cfg.save_texture:
threestudio.info("Exporting textures ...")
geo_out = self.geometry.export(points=mesh.v_pos)
mat_out = self.material.export(points=mesh.v_pos, **geo_out)
if "albedo" in mat_out:
mesh.set_vertex_color(mat_out["albedo"])
params["save_vertex_color"] = True
else:
threestudio.warn(
"save_texture is True but no albedo texture found, not saving vertex color"
)
return [
ExporterOutput(
save_name=f"{self.cfg.save_name}.obj", save_type="obj", params=params
)
]
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/exporters/__init__.py | threestudio/models/exporters/__init__.py | from . import base, mesh_exporter
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/exporters/base.py | threestudio/models/exporters/base.py | from dataclasses import dataclass
import threestudio
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.utils.base import BaseObject
from threestudio.utils.typing import *
@dataclass
class ExporterOutput:
save_name: str
save_type: str
params: Dict[str, Any]
class Exporter(BaseObject):
@dataclass
class Config(BaseObject.Config):
save_video: bool = False
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
@dataclass
class SubModules:
geometry: BaseImplicitGeometry
material: BaseMaterial
background: BaseBackground
self.sub_modules = SubModules(geometry, material, background)
@property
def geometry(self) -> BaseImplicitGeometry:
return self.sub_modules.geometry
@property
def material(self) -> BaseMaterial:
return self.sub_modules.material
@property
def background(self) -> BaseBackground:
return self.sub_modules.background
def __call__(self, *args, **kwargs) -> List[ExporterOutput]:
raise NotImplementedError
@threestudio.register("dummy-exporter")
class DummyExporter(Exporter):
def __call__(self, *args, **kwargs) -> List[ExporterOutput]:
# DummyExporter does not export anything
return []
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/pbr_material.py | threestudio/models/materials/pbr_material.py | import random
from dataclasses import dataclass, field
import envlight
import numpy as np
import nvdiffrast.torch as dr
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.materials.base import BaseMaterial
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
@threestudio.register("pbr-material")
class PBRMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
material_activation: str = "sigmoid"
environment_texture: str = "load/lights/mud_road_puresky_1k.hdr"
environment_scale: float = 2.0
min_metallic: float = 0.0
max_metallic: float = 0.9
min_roughness: float = 0.08
max_roughness: float = 0.9
use_bump: bool = True
cfg: Config
def configure(self) -> None:
self.requires_normal = True
self.requires_tangent = self.cfg.use_bump
self.light = envlight.EnvLight(
self.cfg.environment_texture, scale=self.cfg.environment_scale
)
FG_LUT = torch.from_numpy(
np.fromfile("load/lights/bsdf_256_256.bin", dtype=np.float32).reshape(
1, 256, 256, 2
)
)
self.register_buffer("FG_LUT", FG_LUT)
def forward(
self,
features: Float[Tensor, "*B Nf"],
viewdirs: Float[Tensor, "*B 3"],
shading_normal: Float[Tensor, "B ... 3"],
tangent: Optional[Float[Tensor, "B ... 3"]] = None,
**kwargs,
) -> Float[Tensor, "*B 3"]:
prefix_shape = features.shape[:-1]
material: Float[Tensor, "*B Nf"] = get_activation(self.cfg.material_activation)(
features
)
albedo = material[..., :3]
metallic = (
material[..., 3:4] * (self.cfg.max_metallic - self.cfg.min_metallic)
+ self.cfg.min_metallic
)
roughness = (
material[..., 4:5] * (self.cfg.max_roughness - self.cfg.min_roughness)
+ self.cfg.min_roughness
)
if self.cfg.use_bump:
assert tangent is not None
# perturb_normal is a delta to the initialization [0, 0, 1]
perturb_normal = (material[..., 5:8] * 2 - 1) + torch.tensor(
[0, 0, 1], dtype=material.dtype, device=material.device
)
perturb_normal = F.normalize(perturb_normal.clamp(-1, 1), dim=-1)
# apply normal perturbation in tangent space
bitangent = F.normalize(torch.cross(tangent, shading_normal), dim=-1)
shading_normal = (
tangent * perturb_normal[..., 0:1]
- bitangent * perturb_normal[..., 1:2]
+ shading_normal * perturb_normal[..., 2:3]
)
shading_normal = F.normalize(shading_normal, dim=-1)
v = -viewdirs
n_dot_v = (shading_normal * v).sum(-1, keepdim=True)
reflective = n_dot_v * shading_normal * 2 - v
diffuse_albedo = (1 - metallic) * albedo
fg_uv = torch.cat([n_dot_v, roughness], -1).clamp(0, 1)
fg = dr.texture(
self.FG_LUT,
fg_uv.reshape(1, -1, 1, 2).contiguous(),
filter_mode="linear",
boundary_mode="clamp",
).reshape(*prefix_shape, 2)
F0 = (1 - metallic) * 0.04 + metallic * albedo
specular_albedo = F0 * fg[:, 0:1] + fg[:, 1:2]
diffuse_light = self.light(shading_normal)
specular_light = self.light(reflective, roughness)
color = diffuse_albedo * diffuse_light + specular_albedo * specular_light
color = color.clamp(0.0, 1.0)
return color
def export(self, features: Float[Tensor, "*N Nf"], **kwargs) -> Dict[str, Any]:
material: Float[Tensor, "*N Nf"] = get_activation(self.cfg.material_activation)(
features
)
albedo = material[..., :3]
metallic = (
material[..., 3:4] * (self.cfg.max_metallic - self.cfg.min_metallic)
+ self.cfg.min_metallic
)
roughness = (
material[..., 4:5] * (self.cfg.max_roughness - self.cfg.min_roughness)
+ self.cfg.min_roughness
)
out = {
"albedo": albedo,
"metallic": metallic,
"roughness": roughness,
}
if self.cfg.use_bump:
perturb_normal = (material[..., 5:8] * 2 - 1) + torch.tensor(
[0, 0, 1], dtype=material.dtype, device=material.device
)
perturb_normal = F.normalize(perturb_normal.clamp(-1, 1), dim=-1)
perturb_normal = (perturb_normal + 1) / 2
out.update(
{
"bump": perturb_normal,
}
)
return out
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/sd_latent_adapter_material.py | threestudio/models/materials/sd_latent_adapter_material.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.materials.base import BaseMaterial
from threestudio.utils.typing import *
@threestudio.register("sd-latent-adapter-material")
class StableDiffusionLatentAdapterMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
pass
cfg: Config
def configure(self) -> None:
adapter = nn.Parameter(
torch.as_tensor(
[
# R G B
[0.298, 0.207, 0.208], # L1
[0.187, 0.286, 0.173], # L2
[-0.158, 0.189, 0.264], # L3
[-0.184, -0.271, -0.473], # L4
]
)
)
self.register_parameter("adapter", adapter)
def forward(
self, features: Float[Tensor, "B ... 4"], **kwargs
) -> Float[Tensor, "B ... 3"]:
assert features.shape[-1] == 4
color = features @ self.adapter
color = (color + 1) / 2
color = color.clamp(0.0, 1.0)
return color
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/diffuse_with_point_light_material.py | threestudio/models/materials/diffuse_with_point_light_material.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.materials.base import BaseMaterial
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import *
@threestudio.register("diffuse-with-point-light-material")
class DiffuseWithPointLightMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
ambient_light_color: Tuple[float, float, float] = (0.1, 0.1, 0.1)
diffuse_light_color: Tuple[float, float, float] = (0.9, 0.9, 0.9)
ambient_only_steps: int = 1000
diffuse_prob: float = 0.75
textureless_prob: float = 0.5
albedo_activation: str = "sigmoid"
soft_shading: bool = False
cfg: Config
def configure(self) -> None:
self.requires_normal = True
self.ambient_light_color: Float[Tensor, "3"]
self.register_buffer(
"ambient_light_color",
torch.as_tensor(self.cfg.ambient_light_color, dtype=torch.float32),
)
self.diffuse_light_color: Float[Tensor, "3"]
self.register_buffer(
"diffuse_light_color",
torch.as_tensor(self.cfg.diffuse_light_color, dtype=torch.float32),
)
self.ambient_only = False
def forward(
self,
features: Float[Tensor, "B ... Nf"],
positions: Float[Tensor, "B ... 3"],
shading_normal: Float[Tensor, "B ... 3"],
light_positions: Float[Tensor, "B ... 3"],
ambient_ratio: Optional[float] = None,
shading: Optional[str] = None,
**kwargs,
) -> Float[Tensor, "B ... 3"]:
albedo = get_activation(self.cfg.albedo_activation)(features[..., :3])
if ambient_ratio is not None:
# if ambient ratio is specified, use it
diffuse_light_color = (1 - ambient_ratio) * torch.ones_like(
self.diffuse_light_color
)
ambient_light_color = ambient_ratio * torch.ones_like(
self.ambient_light_color
)
elif self.training and self.cfg.soft_shading:
# otherwise if in training and soft shading is enabled, random a ambient ratio
diffuse_light_color = torch.full_like(
self.diffuse_light_color, random.random()
)
ambient_light_color = 1.0 - diffuse_light_color
else:
# otherwise use the default fixed values
diffuse_light_color = self.diffuse_light_color
ambient_light_color = self.ambient_light_color
light_directions: Float[Tensor, "B ... 3"] = F.normalize(
light_positions - positions, dim=-1
)
diffuse_light: Float[Tensor, "B ... 3"] = (
dot(shading_normal, light_directions).clamp(min=0.0) * diffuse_light_color
)
textureless_color = diffuse_light + ambient_light_color
# clamp albedo to [0, 1] to compute shading
color = albedo.clamp(0.0, 1.0) * textureless_color
if shading is None:
if self.training:
# adopt the same type of augmentation for the whole batch
if self.ambient_only or random.random() > self.cfg.diffuse_prob:
shading = "albedo"
elif random.random() < self.cfg.textureless_prob:
shading = "textureless"
else:
shading = "diffuse"
else:
if self.ambient_only:
shading = "albedo"
else:
# return shaded color by default in evaluation
shading = "diffuse"
# multiply by 0 to prevent checking for unused parameters in DDP
if shading == "albedo":
return albedo + textureless_color * 0
elif shading == "textureless":
return albedo * 0 + textureless_color
elif shading == "diffuse":
return color
else:
raise ValueError(f"Unknown shading type {shading}")
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
if global_step < self.cfg.ambient_only_steps:
self.ambient_only = True
else:
self.ambient_only = False
def export(self, features: Float[Tensor, "*N Nf"], **kwargs) -> Dict[str, Any]:
albedo = get_activation(self.cfg.albedo_activation)(features[..., :3]).clamp(
0.0, 1.0
)
return {"albedo": albedo}
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/hybrid_rgb_latent_material.py | threestudio/models/materials/hybrid_rgb_latent_material.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import *
@threestudio.register("hybrid-rgb-latent-material")
class HybridRGBLatentMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
n_output_dims: int = 3
color_activation: str = "sigmoid"
requires_normal: bool = True
cfg: Config
def configure(self) -> None:
self.requires_normal = self.cfg.requires_normal
def forward(
self, features: Float[Tensor, "B ... Nf"], **kwargs
) -> Float[Tensor, "B ... Nc"]:
assert (
features.shape[-1] == self.cfg.n_output_dims
), f"Expected {self.cfg.n_output_dims} output dims, only got {features.shape[-1]} dims input."
color = features
color[..., :3] = get_activation(self.cfg.color_activation)(color[..., :3])
return color
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/__init__.py | threestudio/models/materials/__init__.py | from . import (
base,
diffuse_with_point_light_material,
hybrid_rgb_latent_material,
neural_radiance_material,
no_material,
pbr_material,
sd_latent_adapter_material,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/neural_radiance_material.py | threestudio/models/materials/neural_radiance_material.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import *
@threestudio.register("neural-radiance-material")
class NeuralRadianceMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
input_feature_dims: int = 8
color_activation: str = "sigmoid"
dir_encoding_config: dict = field(
default_factory=lambda: {"otype": "SphericalHarmonics", "degree": 3}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "FullyFusedMLP",
"activation": "ReLU",
"n_neurons": 16,
"n_hidden_layers": 2,
}
)
cfg: Config
def configure(self) -> None:
self.encoding = get_encoding(3, self.cfg.dir_encoding_config)
self.n_input_dims = self.cfg.input_feature_dims + self.encoding.n_output_dims # type: ignore
self.network = get_mlp(self.n_input_dims, 3, self.cfg.mlp_network_config)
def forward(
self,
features: Float[Tensor, "*B Nf"],
viewdirs: Float[Tensor, "*B 3"],
**kwargs,
) -> Float[Tensor, "*B 3"]:
# viewdirs and normals must be normalized before passing to this function
viewdirs = (viewdirs + 1.0) / 2.0 # (-1, 1) => (0, 1)
viewdirs_embd = self.encoding(viewdirs.view(-1, 3))
network_inp = torch.cat(
[features.view(-1, features.shape[-1]), viewdirs_embd], dim=-1
)
color = self.network(network_inp).view(*features.shape[:-1], 3)
color = get_activation(self.cfg.color_activation)(color)
return color
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/no_material.py | threestudio/models/materials/no_material.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import dot, get_activation
from threestudio.utils.typing import *
@threestudio.register("no-material")
class NoMaterial(BaseMaterial):
@dataclass
class Config(BaseMaterial.Config):
n_output_dims: int = 3
color_activation: str = "sigmoid"
input_feature_dims: Optional[int] = None
mlp_network_config: Optional[dict] = None
requires_normal: bool = False
cfg: Config
def configure(self) -> None:
self.use_network = False
if (
self.cfg.input_feature_dims is not None
and self.cfg.mlp_network_config is not None
):
self.network = get_mlp(
self.cfg.input_feature_dims,
self.cfg.n_output_dims,
self.cfg.mlp_network_config,
)
self.use_network = True
self.requires_normal = self.cfg.requires_normal
def forward(
self, features: Float[Tensor, "B ... Nf"], **kwargs
) -> Float[Tensor, "B ... Nc"]:
if not self.use_network:
assert (
features.shape[-1] == self.cfg.n_output_dims
), f"Expected {self.cfg.n_output_dims} output dims, only got {features.shape[-1]} dims input."
color = get_activation(self.cfg.color_activation)(features)
else:
color = self.network(features.view(-1, features.shape[-1])).view(
*features.shape[:-1], self.cfg.n_output_dims
)
color = get_activation(self.cfg.color_activation)(color)
return color
def export(self, features: Float[Tensor, "*N Nf"], **kwargs) -> Dict[str, Any]:
color = self(features, **kwargs).clamp(0, 1)
assert color.shape[-1] >= 3, "Output color must have at least 3 channels"
if color.shape[-1] > 3:
threestudio.warn(
"Output color has >3 channels, treating the first 3 as RGB"
)
return {"albedo": color[..., :3]}
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/materials/base.py | threestudio/models/materials/base.py | import random
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from threestudio.utils.base import BaseModule
from threestudio.utils.typing import *
class BaseMaterial(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
requires_normal: bool = False
requires_tangent: bool = False
def configure(self):
pass
def forward(self, *args, **kwargs) -> Float[Tensor, "*B 3"]:
raise NotImplementedError
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/DreamReward_guidance2.py | threestudio/models/guidance/DreamReward_guidance2.py | import sys
from dataclasses import dataclass, field
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mvdream.camera_utils import convert_opengl_to_blender, normalize_camera
from mvdream.model_zoo import build_model
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, parse_version
from threestudio.utils.typing import *
import Reward3D as r3d
from PIL import Image
from torchvision.transforms import InterpolationMode
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
def _transform():
return Compose([
Resize(224, interpolation=BICUBIC),
CenterCrop(224),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def calculate_weight(a, b, n=0):
diff = len(str(a).split(".")[0])-len(str(b).split(".")[0])
weight = 10**(diff - n)
return weight
@threestudio.register("DreamReward-guidance2")
class MultiviewDiffusionGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
model_name: str = (
"sd-v2.1-base-4view" # check mvdream.model_zoo.PRETRAINED_MODELS
)
reward_model_path: str = (
""
)
resume_num: int = 0
alg_type: str = "Reward3D_Scorer"#in[Reward3D_Scorer,Reward3D_CrossViewFusion]
ckpt_path: Optional[
str
] = None # path to local checkpoint (None for loading from url)
guidance_scale: float = 50.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
camera_condition_type: str = "rotation"
view_dependent_prompting: bool = False
n_view: int = 4
image_size: int = 256
recon_loss: bool = True
recon_std_rescale: float = 0.5
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Multiview Diffusion ...")
self.model = build_model(self.cfg.model_name, ckpt_path=self.cfg.ckpt_path)
for p in self.model.parameters():
p.requires_grad_(False)
self.num_train_timesteps = 1000
min_step_percent = C(self.cfg.min_step_percent, 0, 0)
max_step_percent = C(self.cfg.max_step_percent, 0, 0)
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
self.grad_clip_val: Optional[float] = None
self.to(self.device)
self.alg_type = self.cfg.alg_type
threestudio.info(f"Loaded Multiview Diffusion!")
med_config_path = "scripts/med_config.json"
state_dict_path = self.cfg.reward_model_path
state_dict = torch.load(state_dict_path)
if self.alg_type == "Reward3D_Scorer":
Reward3D = r3d.Reward3D_(device=self.device, med_config=med_config_path)
elif self.alg_type == "Reward3D_CrossViewFusion":
Reward3D = r3d.Reward3D(device=self.device, med_config=med_config_path)
msg = Reward3D.load_state_dict(state_dict,strict=False)
print(msg)
print(self.cfg.reward_model_path)
self.Reward3D=Reward3D.to(self.device)
threestudio.info(f"Loaded Reward3D!")
def get_camera_cond(
self,
camera: Float[Tensor, "B 4 4"],
fovy=None,
):
# Note: the input of threestudio is already blender coordinate system
# camera = convert_opengl_to_blender(camera)
if self.cfg.camera_condition_type == "rotation": # normalized camera
camera = normalize_camera(camera)
camera = camera.flatten(start_dim=1)
else:
raise NotImplementedError(
f"Unknown camera_condition_type={self.cfg.camera_condition_type}"
)
return camera
def encode_images(
self, imgs: Float[Tensor, "B 3 256 256"]
) -> Float[Tensor, "B 4 32 32"]:
imgs = imgs * 2.0 - 1.0
latents = self.model.get_first_stage_encoding(
self.model.encode_first_stage(imgs)
)
return latents # [B, 4, 32, 32] Latent space image
def decode_latents(
self,
latents: Float[Tensor, "B 4 H W"],
) -> Float[Tensor, "B 3 256 256"]:
input_dtype = latents.dtype
image = self.model.decode_first_stage(latents)
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
def forward(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
c2w: Float[Tensor, "B 4 4"],
rgb_as_latents: bool = False,
fovy=None,
timestep=None,
text_embeddings=None,
input_is_latent=False,
**kwargs,
):
batch_size = rgb.shape[0]
camera = c2w
rgb_BCHW = rgb.permute(0, 3, 1, 2)
if text_embeddings is None:
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
if input_is_latent:
latents = rgb
else:
latents: Float[Tensor, "B 4 64 64"]
if rgb_as_latents:
latents = (
F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
* 2
- 1
)
else:
# interp to 512x512 to be fed into vae.
pred_rgb = F.interpolate(
rgb_BCHW,
(self.cfg.image_size, self.cfg.image_size),
mode="bilinear",
align_corners=False,
)
# encode image into latents with vae, requires grad!
latents = self.encode_images(pred_rgb)
# sample timestep
if timestep is None:
t = torch.randint(
self.min_step,
self.max_step + 1,
[1],
dtype=torch.long,
device=latents.device,
)
t_ = t.item()
else:
assert timestep >= 0 and timestep < self.num_train_timesteps
t = torch.full([1], timestep, dtype=torch.long, device=latents.device)
t_expand = t.repeat(text_embeddings.shape[0])
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents)
latents_noisy = self.model.q_sample(latents, t, noise)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2)
# save input tensors for UNet
if camera is not None:
camera = self.get_camera_cond(camera, fovy)
camera = camera.repeat(2, 1).to(text_embeddings)
context = {
"context": text_embeddings,
"camera": camera,
"num_frames": self.cfg.n_view,
}
else:
context = {"context": text_embeddings}
noise_pred = self.model.apply_model(latent_model_input, t_expand, context)
# perform guidance
noise_pred_text, noise_pred_uncond = noise_pred.chunk(
2
) # Note: flipped compared to stable-dreamfusion
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
if self.cfg.recon_loss:
# reconstruct x0
latents_recon = self.model.predict_start_from_noise(
latents_noisy, t, noise_pred
)
# clip or rescale x0
if self.cfg.recon_std_rescale > 0:
latents_recon_nocfg = self.model.predict_start_from_noise(
latents_noisy, t, noise_pred_text
)
latents_recon_nocfg_reshape = latents_recon_nocfg.view(
-1, self.cfg.n_view, *latents_recon_nocfg.shape[1:]
)
latents_recon_reshape = latents_recon.view(
-1, self.cfg.n_view, *latents_recon.shape[1:]
)
factor = (
latents_recon_nocfg_reshape.std([1, 2, 3, 4], keepdim=True) + 1e-8
) / (latents_recon_reshape.std([1, 2, 3, 4], keepdim=True) + 1e-8)
latents_recon_adjust = latents_recon.clone() * factor.squeeze(
1
).repeat_interleave(self.cfg.n_view, dim=0)
latents_recon = (
self.cfg.recon_std_rescale * latents_recon_adjust
+ (1 - self.cfg.recon_std_rescale) * latents_recon
)
# x0-reconstruction loss from Sec 3.2 and Appendix
loss = (
0.5
* F.mse_loss(latents, latents_recon.detach(), reduction="sum")
/ latents.shape[0]
)
grad = torch.autograd.grad(loss, latents, retain_graph=True)[0]
else:
# Original SDS
# w(t), sigma_t^2
w = 1 - self.alphas_cumprod[t]
grad = w * (noise_pred - noise)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
grad = torch.nan_to_num(grad)
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss = 0.5 * F.mse_loss(latents, target, reduction="sum") / latents.shape[0]
# Get prompt_tokens
if not hasattr(self, 'rm_input_ids'):
self.rm_input_ids = []
self.rm_attention_mask = []
prompts_vds = prompt_utils.prompts_vd
for idx in range(4):
prompts_vd = prompts_vds[idx]
g = self.Reward3D.blip.tokenizer(
prompts_vd,
padding='max_length',
truncation=True,
max_length=100,
return_tensors="pt"
)
self.rm_input_ids.append(g.input_ids)
self.rm_attention_mask.append(g.attention_mask)
self.global_step = 0 + self.cfg.resume_num
else:
self.global_step += 1
adding_reward = self.global_step > 1000
if adding_reward:
# Get direction
direction_idx = torch.zeros_like(elevation, dtype=torch.long)
for d in prompt_utils.directions:
direction_idx[
d.condition(elevation, azimuth, camera_distances)
] = prompt_utils.direction2idx[d.name]
rm_input_ids = torch.cat([self.rm_input_ids[idx] for idx in direction_idx]).to(self.device)
rm_attention_mask = torch.cat([self.rm_attention_mask[idx] for idx in direction_idx]).to(self.device)
if t_ <=300 and self.global_step<=9800:
with torch.no_grad():
image = self.decode_latents(latents_recon.detach())
image = pred_rgb - (pred_rgb - image).detach()
image = _transform()(image)
rewards = self.Reward3D(image,rm_input_ids, rm_attention_mask)
else:
image_render = _transform()(pred_rgb)
rewards = self.Reward3D(image_render,rm_input_ids, rm_attention_mask)
loss_reward = F.relu(-rewards+4).mean()
weight = calculate_weight(loss.item(),loss_reward.item())
loss += loss_reward*weight*0.3
if self.global_step>9800:
loss = loss_reward*1000000
return {
"loss_sds": loss,
"grad_norm": grad.norm(),
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
min_step_percent = C(self.cfg.min_step_percent, epoch, global_step)
max_step_percent = C(self.cfg.max_step_percent, epoch, global_step)
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/multiview_diffusion_guidance.py | threestudio/models/guidance/multiview_diffusion_guidance.py | import sys
from dataclasses import dataclass, field
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mvdream.camera_utils import convert_opengl_to_blender, normalize_camera
from mvdream.model_zoo import build_model
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, parse_version
from threestudio.utils.typing import *
@threestudio.register("multiview-diffusion-guidance")
class MultiviewDiffusionGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
model_name: str = (
"sd-v2.1-base-4view" # check mvdream.model_zoo.PRETRAINED_MODELS
)
ckpt_path: Optional[
str
] = None # path to local checkpoint (None for loading from url)
guidance_scale: float = 50.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
camera_condition_type: str = "rotation"
view_dependent_prompting: bool = False
n_view: int = 4
image_size: int = 256
recon_loss: bool = True
recon_std_rescale: float = 0.5
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Multiview Diffusion ...")
self.model = build_model(self.cfg.model_name, ckpt_path=self.cfg.ckpt_path)
for p in self.model.parameters():
p.requires_grad_(False)
self.num_train_timesteps = 1000
min_step_percent = C(self.cfg.min_step_percent, 0, 0)
max_step_percent = C(self.cfg.max_step_percent, 0, 0)
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
self.grad_clip_val: Optional[float] = None
self.to(self.device)
threestudio.info(f"Loaded Multiview Diffusion!")
def get_camera_cond(
self,
camera: Float[Tensor, "B 4 4"],
fovy=None,
):
# Note: the input of threestudio is already blender coordinate system
# camera = convert_opengl_to_blender(camera)
if self.cfg.camera_condition_type == "rotation": # normalized camera
camera = normalize_camera(camera)
camera = camera.flatten(start_dim=1)
else:
raise NotImplementedError(
f"Unknown camera_condition_type={self.cfg.camera_condition_type}"
)
return camera
def encode_images(
self, imgs: Float[Tensor, "B 3 256 256"]
) -> Float[Tensor, "B 4 32 32"]:
imgs = imgs * 2.0 - 1.0
latents = self.model.get_first_stage_encoding(
self.model.encode_first_stage(imgs)
)
return latents # [B, 4, 32, 32] Latent space image
def forward(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
c2w: Float[Tensor, "B 4 4"],
rgb_as_latents: bool = False,
fovy=None,
timestep=None,
text_embeddings=None,
input_is_latent=False,
**kwargs,
):
batch_size = rgb.shape[0]
camera = c2w
rgb_BCHW = rgb.permute(0, 3, 1, 2)
if text_embeddings is None:
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
if input_is_latent:
latents = rgb
else:
latents: Float[Tensor, "B 4 64 64"]
if rgb_as_latents:
latents = (
F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
* 2
- 1
)
else:
# interp to 512x512 to be fed into vae.
pred_rgb = F.interpolate(
rgb_BCHW,
(self.cfg.image_size, self.cfg.image_size),
mode="bilinear",
align_corners=False,
)
# encode image into latents with vae, requires grad!
latents = self.encode_images(pred_rgb)
# sample timestep
if timestep is None:
t = torch.randint(
self.min_step,
self.max_step + 1,
[1],
dtype=torch.long,
device=latents.device,
)
else:
assert timestep >= 0 and timestep < self.num_train_timesteps
t = torch.full([1], timestep, dtype=torch.long, device=latents.device)
t_expand = t.repeat(text_embeddings.shape[0])
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents)
latents_noisy = self.model.q_sample(latents, t, noise)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2)
# save input tensors for UNet
if camera is not None:
camera = self.get_camera_cond(camera, fovy)
camera = camera.repeat(2, 1).to(text_embeddings)
context = {
"context": text_embeddings,
"camera": camera,
"num_frames": self.cfg.n_view,
}
else:
context = {"context": text_embeddings}
noise_pred = self.model.apply_model(latent_model_input, t_expand, context)
# perform guidance
noise_pred_text, noise_pred_uncond = noise_pred.chunk(
2
) # Note: flipped compared to stable-dreamfusion
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
if self.cfg.recon_loss:
# reconstruct x0
latents_recon = self.model.predict_start_from_noise(
latents_noisy, t, noise_pred
)
# clip or rescale x0
if self.cfg.recon_std_rescale > 0:
latents_recon_nocfg = self.model.predict_start_from_noise(
latents_noisy, t, noise_pred_text
)
latents_recon_nocfg_reshape = latents_recon_nocfg.view(
-1, self.cfg.n_view, *latents_recon_nocfg.shape[1:]
)
latents_recon_reshape = latents_recon.view(
-1, self.cfg.n_view, *latents_recon.shape[1:]
)
factor = (
latents_recon_nocfg_reshape.std([1, 2, 3, 4], keepdim=True) + 1e-8
) / (latents_recon_reshape.std([1, 2, 3, 4], keepdim=True) + 1e-8)
latents_recon_adjust = latents_recon.clone() * factor.squeeze(
1
).repeat_interleave(self.cfg.n_view, dim=0)
latents_recon = (
self.cfg.recon_std_rescale * latents_recon_adjust
+ (1 - self.cfg.recon_std_rescale) * latents_recon
)
# x0-reconstruction loss from Sec 3.2 and Appendix
loss = (
0.5
* F.mse_loss(latents, latents_recon.detach(), reduction="sum")
/ latents.shape[0]
)
grad = torch.autograd.grad(loss, latents, retain_graph=True)[0]
else:
# Original SDS
# w(t), sigma_t^2
w = 1 - self.alphas_cumprod[t]
grad = w * (noise_pred - noise)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
grad = torch.nan_to_num(grad)
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss = 0.5 * F.mse_loss(latents, target, reduction="sum") / latents.shape[0]
return {
"loss_sds": loss,
"grad_norm": grad.norm(),
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
min_step_percent = C(self.cfg.min_step_percent, epoch, global_step)
max_step_percent = C(self.cfg.max_step_percent, epoch, global_step)
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/deep_floyd_guidance.py | threestudio/models/guidance/deep_floyd_guidance.py | from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
from diffusers import IFPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.ops import perpendicular_component
from threestudio.utils.typing import *
@threestudio.register("deep-floyd-guidance")
class DeepFloydGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
pretrained_model_name_or_path: str = "DeepFloyd/IF-I-XL-v1.0"
# FIXME: xformers error
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = True
guidance_scale: float = 20.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
weighting_strategy: str = "sds"
view_dependent_prompting: bool = True
"""Maximum number of batch items to evaluate guidance for (for debugging) and to save on disk. -1 means save all items."""
max_items_eval: int = 4
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Deep Floyd ...")
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
# Create model
self.pipe = IFPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
text_encoder=None,
safety_checker=None,
watermarker=None,
feature_extractor=None,
requires_safety_checker=False,
variant="fp16" if self.cfg.half_precision_weights else None,
torch_dtype=self.weights_dtype,
).to(self.device)
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
threestudio.warn(
f"Use DeepFloyd with xformers may raise error, see https://github.com/deep-floyd/IF/issues/52 to track this problem."
)
self.pipe.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
self.pipe.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
self.pipe.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
self.pipe.unet.to(memory_format=torch.channels_last)
self.unet = self.pipe.unet.eval()
for p in self.unet.parameters():
p.requires_grad_(False)
self.scheduler = self.pipe.scheduler
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
self.grad_clip_val: Optional[float] = None
threestudio.info(f"Loaded Deep Floyd!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return self.unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
).sample.to(input_dtype)
def __call__(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
rgb_as_latents=False,
guidance_eval=False,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
assert rgb_as_latents == False, f"No latent space in {self.__class__.__name__}"
rgb_BCHW = rgb_BCHW * 2.0 - 1.0 # scale to [-1, 1] to match the diffusion range
latents = F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
# timestep ~ U(0.02, 0.98) to avoid very high/low noise level
t = torch.randint(
self.min_step,
self.max_step + 1,
[batch_size],
dtype=torch.long,
device=self.device,
)
if prompt_utils.use_perp_neg:
(
text_embeddings,
neg_guidance_weights,
) = prompt_utils.get_text_embeddings_perp_neg(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
noise = torch.randn_like(latents)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 4),
encoder_hidden_states=text_embeddings,
) # (4B, 6, 64, 64)
noise_pred_text, _ = noise_pred[:batch_size].split(3, dim=1)
noise_pred_uncond, _ = noise_pred[batch_size : batch_size * 2].split(
3, dim=1
)
noise_pred_neg, _ = noise_pred[batch_size * 2 :].split(3, dim=1)
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
neg_guidance_weights = None
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=text_embeddings,
) # (2B, 6, 64, 64)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred_text, predicted_variance = noise_pred_text.split(3, dim=1)
noise_pred_uncond, _ = noise_pred_uncond.split(3, dim=1)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
"""
# thresholding, experimental
if self.cfg.thresholding:
assert batch_size == 1
noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
noise_pred = custom_ddpm_step(self.scheduler,
noise_pred, int(t.item()), latents_noisy, **self.pipe.prepare_extra_step_kwargs(None, 0.0)
)
"""
if self.cfg.weighting_strategy == "sds":
# w(t), sigma_t^2
w = (1 - self.alphas[t]).view(-1, 1, 1, 1)
elif self.cfg.weighting_strategy == "uniform":
w = 1
elif self.cfg.weighting_strategy == "fantasia3d":
w = (self.alphas[t] ** 0.5 * (1 - self.alphas[t])).view(-1, 1, 1, 1)
else:
raise ValueError(
f"Unknown weighting strategy: {self.cfg.weighting_strategy}"
)
grad = w * (noise_pred - noise)
grad = torch.nan_to_num(grad)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# loss = SpecifyGradient.apply(latents, grad)
# SpecifyGradient is not straghtforward, use a reparameterization trick instead
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss_sds = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
guidance_out = {
"loss_sds": loss_sds,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
if guidance_eval:
guidance_eval_utils = {
"use_perp_neg": prompt_utils.use_perp_neg,
"neg_guidance_weights": neg_guidance_weights,
"text_embeddings": text_embeddings,
"t_orig": t,
"latents_noisy": latents_noisy,
"noise_pred": torch.cat([noise_pred, predicted_variance], dim=1),
}
guidance_eval_out = self.guidance_eval(**guidance_eval_utils)
texts = []
for n, e, a, c in zip(
guidance_eval_out["noise_levels"], elevation, azimuth, camera_distances
):
texts.append(
f"n{n:.02f}\ne{e.item():.01f}\na{a.item():.01f}\nc{c.item():.02f}"
)
guidance_eval_out.update({"texts": texts})
guidance_out.update({"eval": guidance_eval_out})
return guidance_out
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def get_noise_pred(
self,
latents_noisy,
t,
text_embeddings,
use_perp_neg=False,
neg_guidance_weights=None,
):
batch_size = latents_noisy.shape[0]
if use_perp_neg:
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 4).to(self.device),
encoder_hidden_states=text_embeddings,
) # (4B, 6, 64, 64)
noise_pred_text, _ = noise_pred[:batch_size].split(3, dim=1)
noise_pred_uncond, _ = noise_pred[batch_size : batch_size * 2].split(
3, dim=1
)
noise_pred_neg, _ = noise_pred[batch_size * 2 :].split(3, dim=1)
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 2).to(self.device),
encoder_hidden_states=text_embeddings,
) # (2B, 6, 64, 64)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred_text, predicted_variance = noise_pred_text.split(3, dim=1)
noise_pred_uncond, _ = noise_pred_uncond.split(3, dim=1)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
return torch.cat([noise_pred, predicted_variance], dim=1)
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def guidance_eval(
self,
t_orig,
text_embeddings,
latents_noisy,
noise_pred,
use_perp_neg=False,
neg_guidance_weights=None,
):
# use only 50 timesteps, and find nearest of those to t
self.scheduler.set_timesteps(50)
self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device)
bs = (
min(self.cfg.max_items_eval, latents_noisy.shape[0])
if self.cfg.max_items_eval > 0
else latents_noisy.shape[0]
) # batch size
large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[
:bs
].unsqueeze(
-1
) # sized [bs,50] > [bs,1]
idxs = torch.min(large_enough_idxs, dim=1)[1]
t = self.scheduler.timesteps_gpu[idxs]
fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy())
imgs_noisy = (latents_noisy[:bs] / 2 + 0.5).permute(0, 2, 3, 1)
# get prev latent
latents_1step = []
pred_1orig = []
for b in range(bs):
step_output = self.scheduler.step(
noise_pred[b : b + 1], t[b], latents_noisy[b : b + 1]
)
latents_1step.append(step_output["prev_sample"])
pred_1orig.append(step_output["pred_original_sample"])
latents_1step = torch.cat(latents_1step)
pred_1orig = torch.cat(pred_1orig)
imgs_1step = (latents_1step / 2 + 0.5).permute(0, 2, 3, 1)
imgs_1orig = (pred_1orig / 2 + 0.5).permute(0, 2, 3, 1)
latents_final = []
for b, i in enumerate(idxs):
latents = latents_1step[b : b + 1]
text_emb = (
text_embeddings[
[b, b + len(idxs), b + 2 * len(idxs), b + 3 * len(idxs)], ...
]
if use_perp_neg
else text_embeddings[[b, b + len(idxs)], ...]
)
neg_guid = neg_guidance_weights[b : b + 1] if use_perp_neg else None
for t in tqdm(self.scheduler.timesteps[i + 1 :], leave=False):
# pred noise
noise_pred = self.get_noise_pred(
latents, t, text_emb, use_perp_neg, neg_guid
)
# get prev latent
latents = self.scheduler.step(noise_pred, t, latents)["prev_sample"]
latents_final.append(latents)
latents_final = torch.cat(latents_final)
imgs_final = (latents_final / 2 + 0.5).permute(0, 2, 3, 1)
return {
"bs": bs,
"noise_levels": fracs,
"imgs_noisy": imgs_noisy,
"imgs_1step": imgs_1step,
"imgs_1orig": imgs_1orig,
"imgs_final": imgs_final,
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.set_min_max_steps(
min_step_percent=C(self.cfg.min_step_percent, epoch, global_step),
max_step_percent=C(self.cfg.max_step_percent, epoch, global_step),
)
"""
# used by thresholding, experimental
def custom_ddpm_step(ddpm, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator=None, return_dict: bool = True):
self = ddpm
t = timestep
prev_t = self.previous_timestep(t)
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
else:
predicted_variance = None
# 1. compute alphas, betas
alpha_prod_t = self.alphas_cumprod[t].item()
alpha_prod_t_prev = self.alphas_cumprod[prev_t].item() if prev_t >= 0 else 1.0
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
current_alpha_t = alpha_prod_t / alpha_prod_t_prev
current_beta_t = 1 - current_alpha_t
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
elif self.config.prediction_type == "sample":
pred_original_sample = model_output
elif self.config.prediction_type == "v_prediction":
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
" `v_prediction` for the DDPMScheduler."
)
# 3. Clip or threshold "predicted x_0"
if self.config.thresholding:
pred_original_sample = self._threshold_sample(pred_original_sample)
elif self.config.clip_sample:
pred_original_sample = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range
)
noise_thresholded = (sample - (alpha_prod_t ** 0.5) * pred_original_sample) / (beta_prod_t ** 0.5)
return noise_thresholded
"""
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/stable_diffusion_vsd_guidance.py | threestudio/models/guidance/stable_diffusion_vsd_guidance.py | import random
from contextlib import contextmanager
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
from diffusers import (
DDPMScheduler,
DPMSolverMultistepScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.models.embeddings import TimestepEmbedding
from diffusers.utils.import_utils import is_xformers_available
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, parse_version
from threestudio.utils.typing import *
class ToWeightsDType(nn.Module):
def __init__(self, module: nn.Module, dtype: torch.dtype):
super().__init__()
self.module = module
self.dtype = dtype
def forward(self, x: Float[Tensor, "..."]) -> Float[Tensor, "..."]:
return self.module(x).to(self.dtype)
@threestudio.register("stable-diffusion-vsd-guidance")
class StableDiffusionVSDGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
pretrained_model_name_or_path: str = "stabilityai/stable-diffusion-2-1-base"
pretrained_model_name_or_path_lora: str = "stabilityai/stable-diffusion-2-1"
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 7.5
guidance_scale_lora: float = 1.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
lora_cfg_training: bool = True
lora_n_timestamp_samples: int = 1
min_step_percent: float = 0.02
max_step_percent: float = 0.98
view_dependent_prompting: bool = True
camera_condition_type: str = "extrinsics"
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Stable Diffusion ...")
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"tokenizer": None,
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
}
pipe_lora_kwargs = {
"tokenizer": None,
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
}
@dataclass
class SubModules:
pipe: StableDiffusionPipeline
pipe_lora: StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
if (
self.cfg.pretrained_model_name_or_path
== self.cfg.pretrained_model_name_or_path_lora
):
self.single_model = True
pipe_lora = pipe
else:
self.single_model = False
pipe_lora = StableDiffusionPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path_lora,
**pipe_lora_kwargs,
).to(self.device)
del pipe_lora.vae
cleanup()
pipe_lora.vae = pipe.vae
self.submodules = SubModules(pipe=pipe, pipe_lora=pipe_lora)
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
self.pipe.enable_xformers_memory_efficient_attention()
self.pipe_lora.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
self.pipe.enable_sequential_cpu_offload()
self.pipe_lora.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
self.pipe.enable_attention_slicing(1)
self.pipe_lora.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
self.pipe.unet.to(memory_format=torch.channels_last)
self.pipe_lora.unet.to(memory_format=torch.channels_last)
del self.pipe.text_encoder
if not self.single_model:
del self.pipe_lora.text_encoder
cleanup()
for p in self.vae.parameters():
p.requires_grad_(False)
for p in self.unet.parameters():
p.requires_grad_(False)
for p in self.unet_lora.parameters():
p.requires_grad_(False)
# FIXME: hard-coded dims
self.camera_embedding = ToWeightsDType(
TimestepEmbedding(16, 1280), self.weights_dtype
).to(self.device)
self.unet_lora.class_embedding = self.camera_embedding
# set up LoRA layers
lora_attn_procs = {}
for name in self.unet_lora.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else self.unet_lora.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = self.unet_lora.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(self.unet_lora.config.block_out_channels))[
block_id
]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = self.unet_lora.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
self.unet_lora.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(self.unet_lora.attn_processors).to(
self.device
)
self.lora_layers._load_state_dict_pre_hooks.clear()
self.lora_layers._state_dict_hooks.clear()
self.scheduler = DDPMScheduler.from_pretrained(
self.cfg.pretrained_model_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
)
self.scheduler_lora = DDPMScheduler.from_pretrained(
self.cfg.pretrained_model_name_or_path_lora,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
)
self.scheduler_sample = DPMSolverMultistepScheduler.from_config(
self.pipe.scheduler.config
)
self.scheduler_lora_sample = DPMSolverMultistepScheduler.from_config(
self.pipe_lora.scheduler.config
)
self.pipe.scheduler = self.scheduler
self.pipe_lora.scheduler = self.scheduler_lora
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
self.grad_clip_val: Optional[float] = None
threestudio.info(f"Loaded Stable Diffusion!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@property
def pipe(self):
return self.submodules.pipe
@property
def pipe_lora(self):
return self.submodules.pipe_lora
@property
def unet(self):
return self.submodules.pipe.unet
@property
def unet_lora(self):
return self.submodules.pipe_lora.unet
@property
def vae(self):
return self.submodules.pipe.vae
@property
def vae_lora(self):
return self.submodules.pipe_lora.vae
@torch.no_grad()
@torch.cuda.amp.autocast(enabled=False)
def _sample(
self,
pipe: StableDiffusionPipeline,
sample_scheduler: DPMSolverMultistepScheduler,
text_embeddings: Float[Tensor, "BB N Nf"],
num_inference_steps: int,
guidance_scale: float,
num_images_per_prompt: int = 1,
height: Optional[int] = None,
width: Optional[int] = None,
class_labels: Optional[Float[Tensor, "BB 16"]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
) -> Float[Tensor, "B H W 3"]:
vae_scale_factor = 2 ** (len(pipe.vae.config.block_out_channels) - 1)
height = height or pipe.unet.config.sample_size * vae_scale_factor
width = width or pipe.unet.config.sample_size * vae_scale_factor
batch_size = text_embeddings.shape[0] // 2
device = self.device
sample_scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = sample_scheduler.timesteps
num_channels_latents = pipe.unet.config.in_channels
latents = pipe.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
self.weights_dtype,
device,
generator,
)
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2)
latent_model_input = sample_scheduler.scale_model_input(
latent_model_input, t
)
# predict the noise residual
if class_labels is None:
with self.disable_unet_class_embedding(pipe.unet) as unet:
noise_pred = unet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings.to(self.weights_dtype),
cross_attention_kwargs=cross_attention_kwargs,
).sample
else:
noise_pred = pipe.unet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings.to(self.weights_dtype),
class_labels=class_labels,
cross_attention_kwargs=cross_attention_kwargs,
).sample
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (
noise_pred_text - noise_pred_uncond
)
# compute the previous noisy sample x_t -> x_t-1
latents = sample_scheduler.step(noise_pred, t, latents).prev_sample
latents = 1 / pipe.vae.config.scaling_factor * latents
images = pipe.vae.decode(latents).sample
images = (images / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
images = images.permute(0, 2, 3, 1).float()
return images
def sample(
self,
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
seed: int = 0,
**kwargs,
) -> Float[Tensor, "N H W 3"]:
# view-dependent text embeddings
text_embeddings_vd = prompt_utils.get_text_embeddings(
elevation,
azimuth,
camera_distances,
view_dependent_prompting=self.cfg.view_dependent_prompting,
)
cross_attention_kwargs = {"scale": 0.0} if self.single_model else None
generator = torch.Generator(device=self.device).manual_seed(seed)
return self._sample(
pipe=self.pipe,
sample_scheduler=self.scheduler_sample,
text_embeddings=text_embeddings_vd,
num_inference_steps=25,
guidance_scale=self.cfg.guidance_scale,
cross_attention_kwargs=cross_attention_kwargs,
generator=generator,
)
def sample_lora(
self,
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
mvp_mtx: Float[Tensor, "B 4 4"],
c2w: Float[Tensor, "B 4 4"],
seed: int = 0,
**kwargs,
) -> Float[Tensor, "N H W 3"]:
# input text embeddings, view-independent
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, view_dependent_prompting=False
)
if self.cfg.camera_condition_type == "extrinsics":
camera_condition = c2w
elif self.cfg.camera_condition_type == "mvp":
camera_condition = mvp_mtx
else:
raise ValueError(
f"Unknown camera_condition_type {self.cfg.camera_condition_type}"
)
B = elevation.shape[0]
camera_condition_cfg = torch.cat(
[
camera_condition.view(B, -1),
torch.zeros_like(camera_condition.view(B, -1)),
],
dim=0,
)
generator = torch.Generator(device=self.device).manual_seed(seed)
return self._sample(
sample_scheduler=self.scheduler_lora_sample,
pipe=self.pipe_lora,
text_embeddings=text_embeddings,
num_inference_steps=25,
guidance_scale=self.cfg.guidance_scale_lora,
class_labels=camera_condition_cfg,
cross_attention_kwargs={"scale": 1.0},
generator=generator,
)
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
unet: UNet2DConditionModel,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
class_labels: Optional[Float[Tensor, "B 16"]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
class_labels=class_labels,
cross_attention_kwargs=cross_attention_kwargs,
).sample.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_images(
self, imgs: Float[Tensor, "B 3 512 512"]
) -> Float[Tensor, "B 4 64 64"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.sample() * self.vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def decode_latents(
self,
latents: Float[Tensor, "B 4 H W"],
latent_height: int = 64,
latent_width: int = 64,
) -> Float[Tensor, "B 3 512 512"]:
input_dtype = latents.dtype
latents = F.interpolate(
latents, (latent_height, latent_width), mode="bilinear", align_corners=False
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents.to(self.weights_dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
@contextmanager
def disable_unet_class_embedding(self, unet: UNet2DConditionModel):
class_embedding = unet.class_embedding
try:
unet.class_embedding = None
yield unet
finally:
unet.class_embedding = class_embedding
def compute_grad_vsd(
self,
latents: Float[Tensor, "B 4 64 64"],
text_embeddings_vd: Float[Tensor, "BB 77 768"],
text_embeddings: Float[Tensor, "BB 77 768"],
camera_condition: Float[Tensor, "B 4 4"],
):
B = latents.shape[0]
with torch.no_grad():
# random timestamp
t = torch.randint(
self.min_step,
self.max_step + 1,
[B],
dtype=torch.long,
device=self.device,
)
# add noise
noise = torch.randn_like(latents)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
with self.disable_unet_class_embedding(self.unet) as unet:
cross_attention_kwargs = {"scale": 0.0} if self.single_model else None
noise_pred_pretrain = self.forward_unet(
unet,
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=text_embeddings_vd,
cross_attention_kwargs=cross_attention_kwargs,
)
# use view-independent text embeddings in LoRA
text_embeddings_cond, _ = text_embeddings.chunk(2)
noise_pred_est = self.forward_unet(
self.unet_lora,
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=torch.cat([text_embeddings_cond] * 2),
class_labels=torch.cat(
[
camera_condition.view(B, -1),
torch.zeros_like(camera_condition.view(B, -1)),
],
dim=0,
),
cross_attention_kwargs={"scale": 1.0},
)
(
noise_pred_pretrain_text,
noise_pred_pretrain_uncond,
) = noise_pred_pretrain.chunk(2)
# NOTE: guidance scale definition here is aligned with diffusers, but different from other guidance
noise_pred_pretrain = noise_pred_pretrain_uncond + self.cfg.guidance_scale * (
noise_pred_pretrain_text - noise_pred_pretrain_uncond
)
# TODO: more general cases
assert self.scheduler.config.prediction_type == "epsilon"
if self.scheduler_lora.config.prediction_type == "v_prediction":
alphas_cumprod = self.scheduler_lora.alphas_cumprod.to(
device=latents_noisy.device, dtype=latents_noisy.dtype
)
alpha_t = alphas_cumprod[t] ** 0.5
sigma_t = (1 - alphas_cumprod[t]) ** 0.5
noise_pred_est = latent_model_input * torch.cat([sigma_t] * 2, dim=0).view(
-1, 1, 1, 1
) + noise_pred_est * torch.cat([alpha_t] * 2, dim=0).view(-1, 1, 1, 1)
(
noise_pred_est_camera,
noise_pred_est_uncond,
) = noise_pred_est.chunk(2)
# NOTE: guidance scale definition here is aligned with diffusers, but different from other guidance
noise_pred_est = noise_pred_est_uncond + self.cfg.guidance_scale_lora * (
noise_pred_est_camera - noise_pred_est_uncond
)
w = (1 - self.alphas[t]).view(-1, 1, 1, 1)
grad = w * (noise_pred_pretrain - noise_pred_est)
return grad
def train_lora(
self,
latents: Float[Tensor, "B 4 64 64"],
text_embeddings: Float[Tensor, "BB 77 768"],
camera_condition: Float[Tensor, "B 4 4"],
):
B = latents.shape[0]
latents = latents.detach().repeat(self.cfg.lora_n_timestamp_samples, 1, 1, 1)
t = torch.randint(
int(self.num_train_timesteps * 0.0),
int(self.num_train_timesteps * 1.0),
[B * self.cfg.lora_n_timestamp_samples],
dtype=torch.long,
device=self.device,
)
noise = torch.randn_like(latents)
noisy_latents = self.scheduler_lora.add_noise(latents, noise, t)
if self.scheduler_lora.config.prediction_type == "epsilon":
target = noise
elif self.scheduler_lora.config.prediction_type == "v_prediction":
target = self.scheduler_lora.get_velocity(latents, noise, t)
else:
raise ValueError(
f"Unknown prediction type {self.scheduler_lora.config.prediction_type}"
)
# use view-independent text embeddings in LoRA
text_embeddings_cond, _ = text_embeddings.chunk(2)
if self.cfg.lora_cfg_training and random.random() < 0.1:
camera_condition = torch.zeros_like(camera_condition)
noise_pred = self.forward_unet(
self.unet_lora,
noisy_latents,
t,
encoder_hidden_states=text_embeddings_cond.repeat(
self.cfg.lora_n_timestamp_samples, 1, 1
),
class_labels=camera_condition.view(B, -1).repeat(
self.cfg.lora_n_timestamp_samples, 1
),
cross_attention_kwargs={"scale": 1.0},
)
return F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
def get_latents(
self, rgb_BCHW: Float[Tensor, "B C H W"], rgb_as_latents=False
) -> Float[Tensor, "B 4 64 64"]:
if rgb_as_latents:
latents = F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
else:
rgb_BCHW_512 = F.interpolate(
rgb_BCHW, (512, 512), mode="bilinear", align_corners=False
)
# encode image into latents with vae
latents = self.encode_images(rgb_BCHW_512)
return latents
def forward(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
mvp_mtx: Float[Tensor, "B 4 4"],
c2w: Float[Tensor, "B 4 4"],
rgb_as_latents=False,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents = self.get_latents(rgb_BCHW, rgb_as_latents=rgb_as_latents)
# view-dependent text embeddings
text_embeddings_vd = prompt_utils.get_text_embeddings(
elevation,
azimuth,
camera_distances,
view_dependent_prompting=self.cfg.view_dependent_prompting,
)
# input text embeddings, view-independent
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, view_dependent_prompting=False
)
if self.cfg.camera_condition_type == "extrinsics":
camera_condition = c2w
elif self.cfg.camera_condition_type == "mvp":
camera_condition = mvp_mtx
else:
raise ValueError(
f"Unknown camera_condition_type {self.cfg.camera_condition_type}"
)
grad = self.compute_grad_vsd(
latents, text_embeddings_vd, text_embeddings, camera_condition
)
grad = torch.nan_to_num(grad)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# reparameterization trick
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
target = (latents - grad).detach()
loss_vsd = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
loss_lora = self.train_lora(latents, text_embeddings, camera_condition)
return {
"loss_vsd": loss_vsd,
"loss_lora": loss_lora,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.set_min_max_steps(
min_step_percent=C(self.cfg.min_step_percent, epoch, global_step),
max_step_percent=C(self.cfg.max_step_percent, epoch, global_step),
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/DreamReward_guidance1.py | threestudio/models/guidance/DreamReward_guidance1.py | import sys
from dataclasses import dataclass, field
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mvdream.camera_utils import convert_opengl_to_blender, normalize_camera
from mvdream.model_zoo import build_model
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, parse_version
from threestudio.utils.typing import *
import Reward3D as r3d
from PIL import Image
from torchvision.transforms import InterpolationMode
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
def _transform():
return Compose([
Resize(224, interpolation=BICUBIC),
CenterCrop(224),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def calculate_weight(a, b, n=0):
diff = len(str(a).split(".")[0])-len(str(b).split(".")[0])
weight = 10**(diff - n)
return weight
@threestudio.register("DreamReward-guidance1")
class MultiviewDiffusionGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
model_name: str = (
"sd-v2.1-base-4view" # check mvdream.model_zoo.PRETRAINED_MODELS
)
reward_model_path: str = (
""
)
resume_num: int = 0
alg_type: str = "Reward3D_Scorer"#in[Reward3D_Scorer,Reward3D_CrossViewFusion]
ckpt_path: Optional[
str
] = None # path to local checkpoint (None for loading from url)
guidance_scale: float = 50.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
camera_condition_type: str = "rotation"
view_dependent_prompting: bool = False
n_view: int = 4
image_size: int = 256
recon_loss: bool = True
recon_std_rescale: float = 0.5
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Multiview Diffusion ...")
self.model = build_model(self.cfg.model_name, ckpt_path=self.cfg.ckpt_path)
for p in self.model.parameters():
p.requires_grad_(False)
self.num_train_timesteps = 1000
min_step_percent = C(self.cfg.min_step_percent, 0, 0)
max_step_percent = C(self.cfg.max_step_percent, 0, 0)
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
self.grad_clip_val: Optional[float] = None
self.to(self.device)
self.alg_type = self.cfg.alg_type
threestudio.info(f"Loaded Multiview Diffusion!")
med_config_path = "scripts/med_config.json"
state_dict_path = self.cfg.reward_model_path
state_dict = torch.load(state_dict_path)
if self.alg_type == "Reward3D_Scorer":
Reward3D = r3d.Reward3D_(device=self.device, med_config=med_config_path)
elif self.alg_type == "Reward3D_CrossViewFusion":
Reward3D = r3d.Reward3D(device=self.device, med_config=med_config_path)
msg = Reward3D.load_state_dict(state_dict,strict=False)
print(msg)
print(self.cfg.reward_model_path)
self.Reward3D=Reward3D.to(self.device)
threestudio.info(f"Loaded Reward3D!")
def get_camera_cond(
self,
camera: Float[Tensor, "B 4 4"],
fovy=None,
):
# Note: the input of threestudio is already blender coordinate system
# camera = convert_opengl_to_blender(camera)
if self.cfg.camera_condition_type == "rotation": # normalized camera
camera = normalize_camera(camera)
camera = camera.flatten(start_dim=1)
else:
raise NotImplementedError(
f"Unknown camera_condition_type={self.cfg.camera_condition_type}"
)
return camera
def encode_images(
self, imgs: Float[Tensor, "B 3 256 256"]
) -> Float[Tensor, "B 4 32 32"]:
imgs = imgs * 2.0 - 1.0
latents = self.model.get_first_stage_encoding(
self.model.encode_first_stage(imgs)
)
return latents # [B, 4, 32, 32] Latent space image
def decode_latents(
self,
latents: Float[Tensor, "B 4 H W"],
) -> Float[Tensor, "B 3 256 256"]:
input_dtype = latents.dtype
image = self.model.decode_first_stage(latents)
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
def forward(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
c2w: Float[Tensor, "B 4 4"],
rgb_as_latents: bool = False,
fovy=None,
timestep=None,
text_embeddings=None,
input_is_latent=False,
**kwargs,
):
batch_size = rgb.shape[0]
camera = c2w
rgb_BCHW = rgb.permute(0, 3, 1, 2)
if text_embeddings is None:
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
if input_is_latent:
latents = rgb
else:
latents: Float[Tensor, "B 4 64 64"]
if rgb_as_latents:
latents = (
F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
* 2
- 1
)
else:
# interp to 512x512 to be fed into vae.
pred_rgb = F.interpolate(
rgb_BCHW,
(self.cfg.image_size, self.cfg.image_size),
mode="bilinear",
align_corners=False,
)
# encode image into latents with vae, requires grad!
latents = self.encode_images(pred_rgb)
# sample timestep
if timestep is None:
t = torch.randint(
self.min_step,
self.max_step + 1,
[1],
dtype=torch.long,
device=latents.device,
)
t_ = t.item()
else:
assert timestep >= 0 and timestep < self.num_train_timesteps
t = torch.full([1], timestep, dtype=torch.long, device=latents.device)
t_expand = t.repeat(text_embeddings.shape[0])
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents)
latents_noisy = self.model.q_sample(latents, t, noise)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2)
# save input tensors for UNet
if camera is not None:
camera = self.get_camera_cond(camera, fovy)
camera = camera.repeat(2, 1).to(text_embeddings)
context = {
"context": text_embeddings,
"camera": camera,
"num_frames": self.cfg.n_view,
}
else:
context = {"context": text_embeddings}
noise_pred = self.model.apply_model(latent_model_input, t_expand, context)
# perform guidance
noise_pred_text, noise_pred_uncond = noise_pred.chunk(
2
) # Note: flipped compared to stable-dreamfusion
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
if self.cfg.recon_loss:
# reconstruct x0
latents_recon = self.model.predict_start_from_noise(
latents_noisy, t, noise_pred
)
# clip or rescale x0
if self.cfg.recon_std_rescale > 0:
latents_recon_nocfg = self.model.predict_start_from_noise(
latents_noisy, t, noise_pred_text
)
latents_recon_nocfg_reshape = latents_recon_nocfg.view(
-1, self.cfg.n_view, *latents_recon_nocfg.shape[1:]
)
latents_recon_reshape = latents_recon.view(
-1, self.cfg.n_view, *latents_recon.shape[1:]
)
factor = (
latents_recon_nocfg_reshape.std([1, 2, 3, 4], keepdim=True) + 1e-8
) / (latents_recon_reshape.std([1, 2, 3, 4], keepdim=True) + 1e-8)
latents_recon_adjust = latents_recon.clone() * factor.squeeze(
1
).repeat_interleave(self.cfg.n_view, dim=0)
latents_recon = (
self.cfg.recon_std_rescale * latents_recon_adjust
+ (1 - self.cfg.recon_std_rescale) * latents_recon
)
# x0-reconstruction loss from Sec 3.2 and Appendix
loss = (
0.5
* F.mse_loss(latents, latents_recon.detach(), reduction="sum")
/ latents.shape[0]
)
grad = torch.autograd.grad(loss, latents, retain_graph=True)[0]
else:
# Original SDS
# w(t), sigma_t^2
w = 1 - self.alphas_cumprod[t]
grad = w * (noise_pred - noise)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
grad = torch.nan_to_num(grad)
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss = 0.5 * F.mse_loss(latents, target, reduction="sum") / latents.shape[0]
# Get prompt_tokens
if not hasattr(self, 'rm_input_ids'):
self.rm_input_ids = []
self.rm_attention_mask = []
prompts_vds = prompt_utils.prompts_vd
for idx in range(4):
prompts_vd = prompts_vds[idx]
g = self.Reward3D.blip.tokenizer(
prompts_vd,
padding='max_length',
truncation=True,
max_length=100,
return_tensors="pt"
)
self.rm_input_ids.append(g.input_ids)
self.rm_attention_mask.append(g.attention_mask)
self.global_step = 0 + self.cfg.resume_num
else:
self.global_step += 1
adding_reward = self.global_step >= 6000
if adding_reward:
# Get direction
direction_idx = torch.zeros_like(elevation, dtype=torch.long)
for d in prompt_utils.directions:
direction_idx[
d.condition(elevation, azimuth, camera_distances)
] = prompt_utils.direction2idx[d.name]
rm_input_ids = torch.cat([self.rm_input_ids[idx] for idx in direction_idx]).to(self.device)
rm_attention_mask = torch.cat([self.rm_attention_mask[idx] for idx in direction_idx]).to(self.device)
if t_ <=100 and self.global_step<=9800:
with torch.no_grad():
image = self.decode_latents(latents_recon.detach())
image = pred_rgb - (pred_rgb - image).detach()
image = _transform()(image)
rewards = self.Reward3D(image,rm_input_ids, rm_attention_mask)
else:
image_render = _transform()(pred_rgb)
rewards = self.Reward3D(image_render,rm_input_ids, rm_attention_mask)
loss_reward = F.relu(-rewards+4).mean()
weight = calculate_weight(loss.item(),loss_reward.item())
loss += loss_reward*weight*0.6
if self.global_step>9800:
loss = loss_reward*1000000
return {
"loss_sds": loss,
"grad_norm": grad.norm(),
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
min_step_percent = C(self.cfg.min_step_percent, epoch, global_step)
max_step_percent = C(self.cfg.max_step_percent, epoch, global_step)
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent) | python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/controlnet_guidance.py | threestudio/models/guidance/controlnet_guidance.py | import os
from dataclasses import dataclass
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from controlnet_aux import CannyDetector, NormalBaeDetector
from diffusers import ControlNetModel, DDIMScheduler, StableDiffusionControlNetPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.typing import *
@threestudio.register("stable-diffusion-controlnet-guidance")
class ControlNetGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
cache_dir: Optional[str] = None
pretrained_model_name_or_path: str = "SG161222/Realistic_Vision_V2.0"
ddim_scheduler_name_or_path: str = "runwayml/stable-diffusion-v1-5"
control_type: str = "normal" # normal/canny
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 7.5
condition_scale: float = 1.5
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
fixed_size: int = -1
min_step_percent: float = 0.02
max_step_percent: float = 0.98
diffusion_steps: int = 20
use_sds: bool = False
# Canny threshold
canny_lower_bound: int = 50
canny_upper_bound: int = 100
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading ControlNet ...")
controlnet_name_or_path: str
if self.cfg.control_type == "normal":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_normalbae"
elif self.cfg.control_type == "canny":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_canny"
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
"cache_dir": self.cfg.cache_dir,
}
controlnet = ControlNetModel.from_pretrained(
controlnet_name_or_path,
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path, controlnet=controlnet, **pipe_kwargs
).to(self.device)
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.ddim_scheduler_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
self.pipe.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
self.pipe.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
self.pipe.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
self.pipe.unet.to(memory_format=torch.channels_last)
# Create model
self.vae = self.pipe.vae.eval()
self.unet = self.pipe.unet.eval()
self.controlnet = self.pipe.controlnet.eval()
if self.cfg.control_type == "normal":
self.preprocessor = NormalBaeDetector.from_pretrained(
"lllyasviel/Annotators"
)
self.preprocessor.model.to(self.device)
elif self.cfg.control_type == "canny":
self.preprocessor = CannyDetector()
for p in self.vae.parameters():
p.requires_grad_(False)
for p in self.unet.parameters():
p.requires_grad_(False)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
self.grad_clip_val: Optional[float] = None
threestudio.info(f"Loaded ControlNet!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@torch.cuda.amp.autocast(enabled=False)
def forward_controlnet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
image_cond: Float[Tensor, "..."],
condition_scale: float,
encoder_hidden_states: Float[Tensor, "..."],
) -> Float[Tensor, "..."]:
return self.controlnet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
controlnet_cond=image_cond.to(self.weights_dtype),
conditioning_scale=condition_scale,
return_dict=False,
)
@torch.cuda.amp.autocast(enabled=False)
def forward_control_unet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
cross_attention_kwargs,
down_block_additional_residuals,
mid_block_additional_residual,
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return self.unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_additional_residuals,
mid_block_additional_residual=mid_block_additional_residual,
).sample.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_images(
self, imgs: Float[Tensor, "B 3 H W"]
) -> Float[Tensor, "B 4 DH DW"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.sample() * self.vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_cond_images(
self, imgs: Float[Tensor, "B 3 H W"]
) -> Float[Tensor, "B 4 DH DW"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.mode()
uncond_image_latents = torch.zeros_like(latents)
latents = torch.cat([latents, latents, uncond_image_latents], dim=0)
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def decode_latents(
self, latents: Float[Tensor, "B 4 DH DW"]
) -> Float[Tensor, "B 3 H W"]:
input_dtype = latents.dtype
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents.to(self.weights_dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
def edit_latents(
self,
text_embeddings: Float[Tensor, "BB 77 768"],
latents: Float[Tensor, "B 4 DH DW"],
image_cond: Float[Tensor, "B 3 H W"],
t: Int[Tensor, "B"],
) -> Float[Tensor, "B 4 DH DW"]:
self.scheduler.config.num_train_timesteps = t.item()
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
with torch.no_grad():
# add noise
noise = torch.randn_like(latents)
latents = self.scheduler.add_noise(latents, noise, t) # type: ignore
# sections of code used from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py
threestudio.debug("Start editing...")
for i, t in enumerate(self.scheduler.timesteps):
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# pred noise
latent_model_input = torch.cat([latents] * 2)
(
down_block_res_samples,
mid_block_res_sample,
) = self.forward_controlnet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings,
image_cond=image_cond,
condition_scale=self.cfg.condition_scale,
)
noise_pred = self.forward_control_unet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings,
cross_attention_kwargs=None,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
)
# perform classifier-free guidance
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
# get previous sample, continue loop
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
threestudio.debug("Editing finished.")
return latents
def prepare_image_cond(self, cond_rgb: Float[Tensor, "B H W C"]):
if self.cfg.control_type == "normal":
cond_rgb = (
(cond_rgb[0].detach().cpu().numpy() * 255).astype(np.uint8).copy()
)
detected_map = self.preprocessor(cond_rgb)
control = (
torch.from_numpy(np.array(detected_map)).float().to(self.device) / 255.0
)
control = control.unsqueeze(0)
control = control.permute(0, 3, 1, 2)
elif self.cfg.control_type == "canny":
cond_rgb = (
(cond_rgb[0].detach().cpu().numpy() * 255).astype(np.uint8).copy()
)
blurred_img = cv2.blur(cond_rgb, ksize=(5, 5))
detected_map = self.preprocessor(
blurred_img, self.cfg.canny_lower_bound, self.cfg.canny_upper_bound
)
control = (
torch.from_numpy(np.array(detected_map)).float().to(self.device) / 255.0
)
control = control.unsqueeze(-1).repeat(1, 1, 3)
control = control.unsqueeze(0)
control = control.permute(0, 3, 1, 2)
return control
def compute_grad_sds(
self,
text_embeddings: Float[Tensor, "BB 77 768"],
latents: Float[Tensor, "B 4 DH DW"],
image_cond: Float[Tensor, "B 3 H W"],
t: Int[Tensor, "B"],
):
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2)
down_block_res_samples, mid_block_res_sample = self.forward_controlnet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings,
image_cond=image_cond,
condition_scale=self.cfg.condition_scale,
)
noise_pred = self.forward_control_unet(
latent_model_input,
t,
encoder_hidden_states=text_embeddings,
cross_attention_kwargs=None,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
)
# perform classifier-free guidance
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
w = (1 - self.alphas[t]).view(-1, 1, 1, 1)
grad = w * (noise_pred - noise)
return grad
def __call__(
self,
rgb: Float[Tensor, "B H W C"],
cond_rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
**kwargs,
):
batch_size, H, W, _ = rgb.shape
assert batch_size == 1
assert rgb.shape[:-1] == cond_rgb.shape[:-1]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents: Float[Tensor, "B 4 DH DW"]
if self.cfg.fixed_size > 0:
RH, RW = self.cfg.fixed_size, self.cfg.fixed_size
else:
RH, RW = H // 8 * 8, W // 8 * 8
rgb_BCHW_HW8 = F.interpolate(
rgb_BCHW, (RH, RW), mode="bilinear", align_corners=False
)
latents = self.encode_images(rgb_BCHW_HW8)
image_cond = self.prepare_image_cond(cond_rgb)
image_cond = F.interpolate(
image_cond, (RH, RW), mode="bilinear", align_corners=False
)
temp = torch.zeros(1).to(rgb.device)
text_embeddings = prompt_utils.get_text_embeddings(temp, temp, temp, False)
# timestep ~ U(0.02, 0.98) to avoid very high/low noise level
t = torch.randint(
self.min_step,
self.max_step + 1,
[batch_size],
dtype=torch.long,
device=self.device,
)
if self.cfg.use_sds:
grad = self.compute_grad_sds(text_embeddings, latents, image_cond, t)
grad = torch.nan_to_num(grad)
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
target = (latents - grad).detach()
loss_sds = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
return {
"loss_sds": loss_sds,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
else:
edit_latents = self.edit_latents(text_embeddings, latents, image_cond, t)
edit_images = self.decode_latents(edit_latents)
edit_images = F.interpolate(edit_images, (H, W), mode="bilinear")
return {"edit_images": edit_images.permute(0, 2, 3, 1)}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.set_min_max_steps(
min_step_percent=C(self.cfg.min_step_percent, epoch, global_step),
max_step_percent=C(self.cfg.max_step_percent, epoch, global_step),
)
if __name__ == "__main__":
from threestudio.utils.config import ExperimentConfig, load_config
from threestudio.utils.typing import Optional
cfg = load_config("configs/debugging/controlnet-normal.yaml")
guidance = threestudio.find(cfg.system.guidance_type)(cfg.system.guidance)
prompt_processor = threestudio.find(cfg.system.prompt_processor_type)(
cfg.system.prompt_processor
)
rgb_image = cv2.imread("assets/face.jpg")[:, :, ::-1].copy() / 255
rgb_image = torch.FloatTensor(rgb_image).unsqueeze(0).to(guidance.device)
prompt_utils = prompt_processor()
guidance_out = guidance(rgb_image, rgb_image, prompt_utils)
edit_image = (
(guidance_out["edit_images"][0].detach().cpu().clip(0, 1).numpy() * 255)
.astype(np.uint8)[:, :, ::-1]
.copy()
)
os.makedirs(".threestudio_cache", exist_ok=True)
cv2.imwrite(".threestudio_cache/edit_image.jpg", edit_image)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/stable_diffusion_guidance.py | threestudio/models/guidance/stable_diffusion_guidance.py | from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
from diffusers import DDIMScheduler, DDPMScheduler, StableDiffusionPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, cleanup, parse_version
from threestudio.utils.ops import perpendicular_component
from threestudio.utils.typing import *
@threestudio.register("stable-diffusion-guidance")
class StableDiffusionGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
pretrained_model_name_or_path: str = "runwayml/stable-diffusion-v1-5"
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 100.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
max_step_percent_annealed: float = 0.5
anneal_start_step: Optional[int] = None
use_sjc: bool = False
var_red: bool = True
weighting_strategy: str = "sds"
token_merging: bool = False
token_merging_params: Optional[dict] = field(default_factory=dict)
view_dependent_prompting: bool = True
"""Maximum number of batch items to evaluate guidance for (for debugging) and to save on disk. -1 means save all items."""
max_items_eval: int = 4
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Stable Diffusion ...")
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"tokenizer": None,
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
}
self.pipe = StableDiffusionPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
self.pipe.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
self.pipe.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
self.pipe.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
self.pipe.unet.to(memory_format=torch.channels_last)
del self.pipe.text_encoder
cleanup()
# Create model
self.vae = self.pipe.vae.eval()
self.unet = self.pipe.unet.eval()
for p in self.vae.parameters():
p.requires_grad_(False)
for p in self.unet.parameters():
p.requires_grad_(False)
if self.cfg.token_merging:
import tomesd
tomesd.apply_patch(self.unet, **self.cfg.token_merging_params)
if self.cfg.use_sjc:
# score jacobian chaining use DDPM
self.scheduler = DDPMScheduler.from_pretrained(
self.cfg.pretrained_model_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
beta_start=0.00085,
beta_end=0.0120,
beta_schedule="scaled_linear",
)
else:
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.pretrained_model_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
if self.cfg.use_sjc:
# score jacobian chaining need mu
self.us: Float[Tensor, "..."] = torch.sqrt((1 - self.alphas) / self.alphas)
self.grad_clip_val: Optional[float] = None
threestudio.info(f"Loaded Stable Diffusion!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return self.unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
).sample.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_images(
self, imgs: Float[Tensor, "B 3 512 512"]
) -> Float[Tensor, "B 4 64 64"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.sample() * self.vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def decode_latents(
self,
latents: Float[Tensor, "B 4 H W"],
latent_height: int = 64,
latent_width: int = 64,
) -> Float[Tensor, "B 3 512 512"]:
input_dtype = latents.dtype
latents = F.interpolate(
latents, (latent_height, latent_width), mode="bilinear", align_corners=False
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents.to(self.weights_dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
def compute_grad_sds(
self,
latents: Float[Tensor, "B 4 64 64"],
t: Int[Tensor, "B"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
):
batch_size = elevation.shape[0]
if prompt_utils.use_perp_neg:
(
text_embeddings,
neg_guidance_weights,
) = prompt_utils.get_text_embeddings_perp_neg(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
noise = torch.randn_like(latents)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 4),
encoder_hidden_states=text_embeddings,
) # (4B, 3, 64, 64)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
neg_guidance_weights = None
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=text_embeddings,
)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
if self.cfg.weighting_strategy == "sds":
# w(t), sigma_t^2
w = (1 - self.alphas[t]).view(-1, 1, 1, 1)
elif self.cfg.weighting_strategy == "uniform":
w = 1
elif self.cfg.weighting_strategy == "fantasia3d":
w = (self.alphas[t] ** 0.5 * (1 - self.alphas[t])).view(-1, 1, 1, 1)
else:
raise ValueError(
f"Unknown weighting strategy: {self.cfg.weighting_strategy}"
)
grad = w * (noise_pred - noise)
guidance_eval_utils = {
"use_perp_neg": prompt_utils.use_perp_neg,
"neg_guidance_weights": neg_guidance_weights,
"text_embeddings": text_embeddings,
"t_orig": t,
"latents_noisy": latents_noisy,
"noise_pred": noise_pred,
}
return grad, guidance_eval_utils
def compute_grad_sjc(
self,
latents: Float[Tensor, "B 4 64 64"],
t: Int[Tensor, "B"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
):
batch_size = elevation.shape[0]
sigma = self.us[t]
sigma = sigma.view(-1, 1, 1, 1)
if prompt_utils.use_perp_neg:
(
text_embeddings,
neg_guidance_weights,
) = prompt_utils.get_text_embeddings_perp_neg(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
noise = torch.randn_like(latents)
y = latents
zs = y + sigma * noise
scaled_zs = zs / torch.sqrt(1 + sigma**2)
# pred noise
latent_model_input = torch.cat([scaled_zs] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 4),
encoder_hidden_states=text_embeddings,
) # (4B, 3, 64, 64)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
neg_guidance_weights = None
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
y = latents
zs = y + sigma * noise
scaled_zs = zs / torch.sqrt(1 + sigma**2)
# pred noise
latent_model_input = torch.cat([scaled_zs] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=text_embeddings,
)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
Ds = zs - sigma * noise_pred
if self.cfg.var_red:
grad = -(Ds - y) / sigma
else:
grad = -(Ds - zs) / sigma
guidance_eval_utils = {
"use_perp_neg": prompt_utils.use_perp_neg,
"neg_guidance_weights": neg_guidance_weights,
"text_embeddings": text_embeddings,
"t_orig": t,
"latents_noisy": scaled_zs,
"noise_pred": noise_pred,
}
return grad, guidance_eval_utils
def __call__(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
rgb_as_latents=False,
guidance_eval=False,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents: Float[Tensor, "B 4 64 64"]
if rgb_as_latents:
latents = F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
else:
rgb_BCHW_512 = F.interpolate(
rgb_BCHW, (512, 512), mode="bilinear", align_corners=False
)
# encode image into latents with vae
latents = self.encode_images(rgb_BCHW_512)
# timestep ~ U(0.02, 0.98) to avoid very high/low noise level
t = torch.randint(
self.min_step,
self.max_step + 1,
[batch_size],
dtype=torch.long,
device=self.device,
)
if self.cfg.use_sjc:
grad, guidance_eval_utils = self.compute_grad_sjc(
latents, t, prompt_utils, elevation, azimuth, camera_distances
)
else:
grad, guidance_eval_utils = self.compute_grad_sds(
latents, t, prompt_utils, elevation, azimuth, camera_distances
)
grad = torch.nan_to_num(grad)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# loss = SpecifyGradient.apply(latents, grad)
# SpecifyGradient is not straghtforward, use a reparameterization trick instead
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss_sds = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
guidance_out = {
"loss_sds": loss_sds,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
if guidance_eval:
guidance_eval_out = self.guidance_eval(**guidance_eval_utils)
texts = []
for n, e, a, c in zip(
guidance_eval_out["noise_levels"], elevation, azimuth, camera_distances
):
texts.append(
f"n{n:.02f}\ne{e.item():.01f}\na{a.item():.01f}\nc{c.item():.02f}"
)
guidance_eval_out.update({"texts": texts})
guidance_out.update({"eval": guidance_eval_out})
return guidance_out
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def get_noise_pred(
self,
latents_noisy,
t,
text_embeddings,
use_perp_neg=False,
neg_guidance_weights=None,
):
batch_size = latents_noisy.shape[0]
if use_perp_neg:
# pred noise
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 4).to(self.device),
encoder_hidden_states=text_embeddings,
) # (4B, 3, 64, 64)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 2).to(self.device),
encoder_hidden_states=text_embeddings,
)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
return noise_pred
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def guidance_eval(
self,
t_orig,
text_embeddings,
latents_noisy,
noise_pred,
use_perp_neg=False,
neg_guidance_weights=None,
):
# use only 50 timesteps, and find nearest of those to t
self.scheduler.set_timesteps(50)
self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device)
bs = (
min(self.cfg.max_items_eval, latents_noisy.shape[0])
if self.cfg.max_items_eval > 0
else latents_noisy.shape[0]
) # batch size
large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[
:bs
].unsqueeze(
-1
) # sized [bs,50] > [bs,1]
idxs = torch.min(large_enough_idxs, dim=1)[1]
t = self.scheduler.timesteps_gpu[idxs]
fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy())
imgs_noisy = self.decode_latents(latents_noisy[:bs]).permute(0, 2, 3, 1)
# get prev latent
latents_1step = []
pred_1orig = []
for b in range(bs):
step_output = self.scheduler.step(
noise_pred[b : b + 1], t[b], latents_noisy[b : b + 1], eta=1
)
latents_1step.append(step_output["prev_sample"])
pred_1orig.append(step_output["pred_original_sample"])
latents_1step = torch.cat(latents_1step)
pred_1orig = torch.cat(pred_1orig)
imgs_1step = self.decode_latents(latents_1step).permute(0, 2, 3, 1)
imgs_1orig = self.decode_latents(pred_1orig).permute(0, 2, 3, 1)
latents_final = []
for b, i in enumerate(idxs):
latents = latents_1step[b : b + 1]
text_emb = (
text_embeddings[
[b, b + len(idxs), b + 2 * len(idxs), b + 3 * len(idxs)], ...
]
if use_perp_neg
else text_embeddings[[b, b + len(idxs)], ...]
)
neg_guid = neg_guidance_weights[b : b + 1] if use_perp_neg else None
for t in tqdm(self.scheduler.timesteps[i + 1 :], leave=False):
# pred noise
noise_pred = self.get_noise_pred(
latents, t, text_emb, use_perp_neg, neg_guid
)
# get prev latent
latents = self.scheduler.step(noise_pred, t, latents, eta=1)[
"prev_sample"
]
latents_final.append(latents)
latents_final = torch.cat(latents_final)
imgs_final = self.decode_latents(latents_final).permute(0, 2, 3, 1)
return {
"bs": bs,
"noise_levels": fracs,
"imgs_noisy": imgs_noisy,
"imgs_1step": imgs_1step,
"imgs_1orig": imgs_1orig,
"imgs_final": imgs_final,
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.set_min_max_steps(
min_step_percent=C(self.cfg.min_step_percent, epoch, global_step),
max_step_percent=C(self.cfg.max_step_percent, epoch, global_step),
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/stable_diffusion_unified_guidance.py | threestudio/models/guidance/stable_diffusion_unified_guidance.py | import random
from contextlib import contextmanager
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDPMScheduler,
DPMSolverSinglestepScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.models.embeddings import TimestepEmbedding
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
import threestudio
from threestudio.models.networks import ToDTypeWrapper
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, enable_gradient, parse_version
from threestudio.utils.ops import perpendicular_component
from threestudio.utils.typing import *
@threestudio.register("stable-diffusion-unified-guidance")
class StableDiffusionUnifiedGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
# guidance type, in ["sds", "vsd"]
guidance_type: str = "sds"
pretrained_model_name_or_path: str = "runwayml/stable-diffusion-v1-5"
guidance_scale: float = 100.0
weighting_strategy: str = "dreamfusion"
view_dependent_prompting: bool = True
min_step_percent: Any = 0.02
max_step_percent: Any = 0.98
grad_clip: Optional[Any] = None
return_rgb_1step_orig: bool = False
return_rgb_multistep_orig: bool = False
n_rgb_multistep_orig_steps: int = 4
# TODO
# controlnet
controlnet_model_name_or_path: Optional[str] = None
preprocessor: Optional[str] = None
control_scale: float = 1.0
# TODO
# lora
lora_model_name_or_path: Optional[str] = None
# efficiency-related configurations
half_precision_weights: bool = True
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
token_merging: bool = False
token_merging_params: Optional[dict] = field(default_factory=dict)
# VSD configurations, only used when guidance_type is "vsd"
vsd_phi_model_name_or_path: Optional[str] = None
vsd_guidance_scale_phi: float = 1.0
vsd_use_lora: bool = True
vsd_lora_cfg_training: bool = False
vsd_lora_n_timestamp_samples: int = 1
vsd_use_camera_condition: bool = True
# camera condition type, in ["extrinsics", "mvp", "spherical"]
vsd_camera_condition_type: Optional[str] = "extrinsics"
cfg: Config
def configure(self) -> None:
self.min_step: Optional[int] = None
self.max_step: Optional[int] = None
self.grad_clip_val: Optional[float] = None
@dataclass
class NonTrainableModules:
pipe: StableDiffusionPipeline
pipe_phi: Optional[StableDiffusionPipeline] = None
controlnet: Optional[ControlNetModel] = None
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
threestudio.info(f"Loading Stable Diffusion ...")
pipe_kwargs = {
"tokenizer": None,
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
}
pipe = StableDiffusionPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe)
self.configure_pipe_token_merging(pipe)
# phi network for VSD
# introduce two trainable modules:
# - self.camera_embedding
# - self.lora_layers
pipe_phi = None
# if the phi network shares the same unet with the pretrain network
# we need to pass additional cross attention kwargs to the unet
self.vsd_share_model = (
self.cfg.guidance_type == "vsd"
and self.cfg.vsd_phi_model_name_or_path is None
)
if self.cfg.guidance_type == "vsd":
if self.cfg.vsd_phi_model_name_or_path is None:
pipe_phi = pipe
else:
pipe_phi = StableDiffusionPipeline.from_pretrained(
self.cfg.vsd_phi_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe_phi)
self.configure_pipe_token_merging(pipe_phi)
# set up camera embedding
if self.cfg.vsd_use_camera_condition:
if self.cfg.vsd_camera_condition_type in ["extrinsics", "mvp"]:
self.camera_embedding_dim = 16
elif self.cfg.vsd_camera_condition_type == "spherical":
self.camera_embedding_dim = 4
else:
raise ValueError("Invalid camera condition type!")
# FIXME: hard-coded output dim
self.camera_embedding = ToDTypeWrapper(
TimestepEmbedding(self.camera_embedding_dim, 1280),
self.weights_dtype,
).to(self.device)
pipe_phi.unet.class_embedding = self.camera_embedding
if self.cfg.vsd_use_lora:
# set up LoRA layers
lora_attn_procs = {}
for name in pipe_phi.unet.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else pipe_phi.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipe_phi.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(
reversed(pipe_phi.unet.config.block_out_channels)
)[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipe_phi.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
pipe_phi.unet.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(pipe_phi.unet.attn_processors).to(
self.device
)
self.lora_layers._load_state_dict_pre_hooks.clear()
self.lora_layers._state_dict_hooks.clear()
threestudio.info(f"Loaded Stable Diffusion!")
# controlnet
controlnet = None
if self.cfg.controlnet_model_name_or_path is not None:
threestudio.info(f"Loading ControlNet ...")
controlnet = ControlNetModel.from_pretrained(
self.cfg.controlnet_model_name_or_path,
torch_dtype=self.weights_dtype,
).to(self.device)
controlnet.eval()
enable_gradient(controlnet, enabled=False)
threestudio.info(f"Loaded ControlNet!")
self.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
# q(z_t|x) = N(alpha_t x, sigma_t^2 I)
# in DDPM, alpha_t = sqrt(alphas_cumprod_t), sigma_t^2 = 1 - alphas_cumprod_t
self.alphas_cumprod: Float[Tensor, "T"] = self.scheduler.alphas_cumprod.to(
self.device
)
self.alphas: Float[Tensor, "T"] = self.alphas_cumprod**0.5
self.sigmas: Float[Tensor, "T"] = (1 - self.alphas_cumprod) ** 0.5
# log SNR
self.lambdas: Float[Tensor, "T"] = self.sigmas / self.alphas
self._non_trainable_modules = NonTrainableModules(
pipe=pipe,
pipe_phi=pipe_phi,
controlnet=controlnet,
)
@property
def pipe(self) -> StableDiffusionPipeline:
return self._non_trainable_modules.pipe
@property
def pipe_phi(self) -> StableDiffusionPipeline:
if self._non_trainable_modules.pipe_phi is None:
raise RuntimeError("phi model is not available.")
return self._non_trainable_modules.pipe_phi
@property
def controlnet(self) -> ControlNetModel:
if self._non_trainable_modules.controlnet is None:
raise RuntimeError("ControlNet model is not available.")
return self._non_trainable_modules.controlnet
def prepare_pipe(self, pipe: StableDiffusionPipeline):
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
pipe.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
pipe.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
pipe.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
pipe.unet.to(memory_format=torch.channels_last)
# FIXME: pipe.__call__ requires text_encoder.dtype
# pipe.text_encoder.to("meta")
cleanup()
pipe.vae.eval()
pipe.unet.eval()
enable_gradient(pipe.vae, enabled=False)
enable_gradient(pipe.unet, enabled=False)
# disable progress bar
pipe.set_progress_bar_config(disable=True)
def configure_pipe_token_merging(self, pipe: StableDiffusionPipeline):
if self.cfg.token_merging:
import tomesd
tomesd.apply_patch(pipe.unet, **self.cfg.token_merging_params)
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
unet: UNet2DConditionModel,
latents: Float[Tensor, "..."],
t: Int[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
class_labels: Optional[Float[Tensor, "..."]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
down_block_additional_residuals: Optional[Float[Tensor, "..."]] = None,
mid_block_additional_residual: Optional[Float[Tensor, "..."]] = None,
velocity_to_epsilon: bool = False,
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
pred = unet(
latents.to(unet.dtype),
t.to(unet.dtype),
encoder_hidden_states=encoder_hidden_states.to(unet.dtype),
class_labels=class_labels,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_additional_residuals,
mid_block_additional_residual=mid_block_additional_residual,
).sample
if velocity_to_epsilon:
pred = latents * self.sigmas[t].view(-1, 1, 1, 1) + pred * self.alphas[
t
].view(-1, 1, 1, 1)
return pred.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def vae_encode(
self, vae: AutoencoderKL, imgs: Float[Tensor, "B 3 H W"], mode=False
) -> Float[Tensor, "B 4 Hl Wl"]:
# expect input in [-1, 1]
input_dtype = imgs.dtype
posterior = vae.encode(imgs.to(vae.dtype)).latent_dist
if mode:
latents = posterior.mode()
else:
latents = posterior.sample()
latents = latents * vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def vae_decode(
self, vae: AutoencoderKL, latents: Float[Tensor, "B 4 Hl Wl"]
) -> Float[Tensor, "B 3 H W"]:
# output in [0, 1]
input_dtype = latents.dtype
latents = 1 / vae.config.scaling_factor * latents
image = vae.decode(latents.to(vae.dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
@contextmanager
def disable_unet_class_embedding(self, unet: UNet2DConditionModel):
class_embedding = unet.class_embedding
try:
unet.class_embedding = None
yield unet
finally:
unet.class_embedding = class_embedding
@contextmanager
def set_scheduler(
self, pipe: StableDiffusionPipeline, scheduler_class: Any, **kwargs
):
scheduler_orig = pipe.scheduler
pipe.scheduler = scheduler_class.from_config(scheduler_orig.config, **kwargs)
yield pipe
pipe.scheduler = scheduler_orig
def get_eps_pretrain(
self,
latents_noisy: Float[Tensor, "B 4 Hl Wl"],
t: Int[Tensor, "B"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
) -> Float[Tensor, "B 4 Hl Wl"]:
batch_size = latents_noisy.shape[0]
if prompt_utils.use_perp_neg:
(
text_embeddings,
neg_guidance_weights,
) = prompt_utils.get_text_embeddings_perp_neg(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
with self.disable_unet_class_embedding(self.pipe.unet) as unet:
noise_pred = self.forward_unet(
unet,
torch.cat([latents_noisy] * 4, dim=0),
torch.cat([t] * 4, dim=0),
encoder_hidden_states=text_embeddings,
cross_attention_kwargs={"scale": 0.0}
if self.vsd_share_model
else None,
velocity_to_epsilon=self.pipe.scheduler.config.prediction_type
== "v_prediction",
) # (4B, 3, Hl, Wl)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
with self.disable_unet_class_embedding(self.pipe.unet) as unet:
noise_pred = self.forward_unet(
unet,
torch.cat([latents_noisy] * 2, dim=0),
torch.cat([t] * 2, dim=0),
encoder_hidden_states=text_embeddings,
cross_attention_kwargs={"scale": 0.0}
if self.vsd_share_model
else None,
velocity_to_epsilon=self.pipe.scheduler.config.prediction_type
== "v_prediction",
)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
return noise_pred
def get_eps_phi(
self,
latents_noisy: Float[Tensor, "B 4 Hl Wl"],
t: Int[Tensor, "B"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
camera_condition: Float[Tensor, "B ..."],
) -> Float[Tensor, "B 4 Hl Wl"]:
batch_size = latents_noisy.shape[0]
# not using view-dependent prompting in LoRA
text_embeddings, _ = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, view_dependent_prompting=False
).chunk(2)
with torch.no_grad():
noise_pred = self.forward_unet(
self.pipe_phi.unet,
torch.cat([latents_noisy] * 2, dim=0),
torch.cat([t] * 2, dim=0),
encoder_hidden_states=torch.cat([text_embeddings] * 2, dim=0),
class_labels=torch.cat(
[
camera_condition.view(batch_size, -1),
torch.zeros_like(camera_condition.view(batch_size, -1)),
],
dim=0,
)
if self.cfg.vsd_use_camera_condition
else None,
cross_attention_kwargs={"scale": 1.0},
velocity_to_epsilon=self.pipe_phi.scheduler.config.prediction_type
== "v_prediction",
)
noise_pred_camera, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.vsd_guidance_scale_phi * (
noise_pred_camera - noise_pred_uncond
)
return noise_pred
def train_phi(
self,
latents: Float[Tensor, "B 4 Hl Wl"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
camera_condition: Float[Tensor, "B ..."],
):
B = latents.shape[0]
latents = latents.detach().repeat(
self.cfg.vsd_lora_n_timestamp_samples, 1, 1, 1
)
num_train_timesteps = self.pipe_phi.scheduler.config.num_train_timesteps
t = torch.randint(
int(num_train_timesteps * 0.0),
int(num_train_timesteps * 1.0),
[B * self.cfg.vsd_lora_n_timestamp_samples],
dtype=torch.long,
device=self.device,
)
noise = torch.randn_like(latents)
latents_noisy = self.pipe_phi.scheduler.add_noise(latents, noise, t)
if self.pipe_phi.scheduler.config.prediction_type == "epsilon":
target = noise
elif self.pipe_phi.scheduler.prediction_type == "v_prediction":
target = self.pipe_phi.scheduler.get_velocity(latents, noise, t)
else:
raise ValueError(
f"Unknown prediction type {self.pipe_phi.scheduler.prediction_type}"
)
# not using view-dependent prompting in LoRA
text_embeddings, _ = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, view_dependent_prompting=False
).chunk(2)
if (
self.cfg.vsd_use_camera_condition
and self.cfg.vsd_lora_cfg_training
and random.random() < 0.1
):
camera_condition = torch.zeros_like(camera_condition)
noise_pred = self.forward_unet(
self.pipe_phi.unet,
latents_noisy,
t,
encoder_hidden_states=text_embeddings.repeat(
self.cfg.vsd_lora_n_timestamp_samples, 1, 1
),
class_labels=camera_condition.view(B, -1).repeat(
self.cfg.vsd_lora_n_timestamp_samples, 1
)
if self.cfg.vsd_use_camera_condition
else None,
cross_attention_kwargs={"scale": 1.0},
)
return F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
def forward(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
mvp_mtx: Float[Tensor, "B 4 4"],
c2w: Float[Tensor, "B 4 4"],
rgb_as_latents=False,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents: Float[Tensor, "B 4 Hl Wl"]
if rgb_as_latents:
# treat input rgb as latents
# input rgb should be in range [-1, 1]
latents = F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
else:
# treat input rgb as rgb
# input rgb should be in range [0, 1]
rgb_BCHW = F.interpolate(
rgb_BCHW, (512, 512), mode="bilinear", align_corners=False
)
# encode image into latents with vae
latents = self.vae_encode(self.pipe.vae, rgb_BCHW * 2.0 - 1.0)
# sample timestep
# use the same timestep for each batch
assert self.min_step is not None and self.max_step is not None
t = torch.randint(
self.min_step,
self.max_step + 1,
[1],
dtype=torch.long,
device=self.device,
).repeat(batch_size)
# sample noise
noise = torch.randn_like(latents)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
eps_pretrain = self.get_eps_pretrain(
latents_noisy, t, prompt_utils, elevation, azimuth, camera_distances
)
latents_1step_orig = (
1
/ self.alphas[t].view(-1, 1, 1, 1)
* (latents_noisy - self.sigmas[t].view(-1, 1, 1, 1) * eps_pretrain)
).detach()
if self.cfg.guidance_type == "sds":
eps_phi = noise
elif self.cfg.guidance_type == "vsd":
if self.cfg.vsd_camera_condition_type == "extrinsics":
camera_condition = c2w
elif self.cfg.vsd_camera_condition_type == "mvp":
camera_condition = mvp_mtx
elif self.cfg.vsd_camera_condition_type == "spherical":
camera_condition = torch.stack(
[
torch.deg2rad(elevation),
torch.sin(torch.deg2rad(azimuth)),
torch.cos(torch.deg2rad(azimuth)),
camera_distances,
],
dim=-1,
)
else:
raise ValueError(
f"Unknown camera_condition_type {self.cfg.vsd_camera_condition_type}"
)
eps_phi = self.get_eps_phi(
latents_noisy,
t,
prompt_utils,
elevation,
azimuth,
camera_distances,
camera_condition,
)
loss_train_phi = self.train_phi(
latents,
prompt_utils,
elevation,
azimuth,
camera_distances,
camera_condition,
)
if self.cfg.weighting_strategy == "dreamfusion":
w = (1.0 - self.alphas[t]).view(-1, 1, 1, 1)
elif self.cfg.weighting_strategy == "uniform":
w = 1.0
elif self.cfg.weighting_strategy == "fantasia3d":
w = (self.alphas[t] ** 0.5 * (1 - self.alphas[t])).view(-1, 1, 1, 1)
else:
raise ValueError(
f"Unknown weighting strategy: {self.cfg.weighting_strategy}"
)
grad = w * (eps_pretrain - eps_phi)
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# reparameterization trick:
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
target = (latents - grad).detach()
loss_sd = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
guidance_out = {
"loss_sd": loss_sd,
"grad_norm": grad.norm(),
"timesteps": t,
"min_step": self.min_step,
"max_step": self.max_step,
"latents": latents,
"latents_1step_orig": latents_1step_orig,
"rgb": rgb_BCHW.permute(0, 2, 3, 1),
"weights": w,
"lambdas": self.lambdas[t],
}
if self.cfg.return_rgb_1step_orig:
with torch.no_grad():
rgb_1step_orig = self.vae_decode(
self.pipe.vae, latents_1step_orig
).permute(0, 2, 3, 1)
guidance_out.update({"rgb_1step_orig": rgb_1step_orig})
if self.cfg.return_rgb_multistep_orig:
with self.set_scheduler(
self.pipe,
DPMSolverSinglestepScheduler,
solver_order=1,
num_train_timesteps=int(t[0]),
) as pipe:
text_embeddings = prompt_utils.get_text_embeddings(
elevation,
azimuth,
camera_distances,
self.cfg.view_dependent_prompting,
)
text_embeddings_cond, text_embeddings_uncond = text_embeddings.chunk(2)
with torch.cuda.amp.autocast(enabled=False):
latents_multistep_orig = pipe(
num_inference_steps=self.cfg.n_rgb_multistep_orig_steps,
guidance_scale=self.cfg.guidance_scale,
eta=1.0,
latents=latents_noisy.to(pipe.unet.dtype),
prompt_embeds=text_embeddings_cond.to(pipe.unet.dtype),
negative_prompt_embeds=text_embeddings_uncond.to(
pipe.unet.dtype
),
cross_attention_kwargs={"scale": 0.0}
if self.vsd_share_model
else None,
output_type="latent",
).images.to(latents.dtype)
with torch.no_grad():
rgb_multistep_orig = self.vae_decode(
self.pipe.vae, latents_multistep_orig
)
guidance_out.update(
{
"latents_multistep_orig": latents_multistep_orig,
"rgb_multistep_orig": rgb_multistep_orig.permute(0, 2, 3, 1),
}
)
if self.cfg.guidance_type == "vsd":
guidance_out.update(
{
"loss_train_phi": loss_train_phi,
}
)
return guidance_out
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.min_step = int(
self.num_train_timesteps * C(self.cfg.min_step_percent, epoch, global_step)
)
self.max_step = int(
self.num_train_timesteps * C(self.cfg.max_step_percent, epoch, global_step)
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/__init__.py | threestudio/models/guidance/__init__.py | from . import (
multiview_diffusion_guidance,
DreamReward_guidance1,
DreamReward_guidance2
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/deep_floyd_finetune_guidance.py | threestudio/models/guidance/deep_floyd_finetune_guidance.py | from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
from diffusers import IFPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.ops import perpendicular_component
from threestudio.utils.typing import *
import ImageReward as RM
from PIL import Image
from torchvision.transforms import InterpolationMode
from torchvision.transforms import Compose, Resize, CenterCrop, Normalize
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
def _transform():
return Compose([
Resize(224, interpolation=BICUBIC),
CenterCrop(224),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
@threestudio.register("deep-floyd-finetune-guidance")
class DeepFloydGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
pretrained_model_name_or_path: str = "DeepFloyd/IF-I-XL-v1.0"
# FIXME: xformers error
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = True
guidance_scale: float = 20.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
weighting_strategy: str = "sds"
view_dependent_prompting: bool = True
"""Maximum number of batch items to evaluate guidance for (for debugging) and to save on disk. -1 means save all items."""
max_items_eval: int = 4
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Deep Floyd ...")
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
# Create model
self.pipe = IFPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
text_encoder=None,
safety_checker=None,
watermarker=None,
feature_extractor=None,
requires_safety_checker=False,
variant="fp16" if self.cfg.half_precision_weights else None,
torch_dtype=self.weights_dtype,
).to(self.device)
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
threestudio.warn(
f"Use DeepFloyd with xformers may raise error, see https://github.com/deep-floyd/IF/issues/52 to track this problem."
)
self.pipe.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
self.pipe.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
self.pipe.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
self.pipe.unet.to(memory_format=torch.channels_last)
self.unet = self.pipe.unet.eval()
for p in self.unet.parameters():
p.requires_grad_(False)
self.scheduler = self.pipe.scheduler
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
self.grad_clip_val: Optional[float] = None
threestudio.info(f"Loaded Deep Floyd!")
reward_model = RM.load("ImageReward-v1.0")
self.reward_model=reward_model.to(self.device)
threestudio.info(f"Loaded ImageReward!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return self.unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
).sample.to(input_dtype)
def __call__(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
rgb_as_latents=False,
guidance_eval=False,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
assert rgb_as_latents == False, f"No latent space in {self.__class__.__name__}"
rgb_BCHW = rgb_BCHW * 2.0 - 1.0 # scale to [-1, 1] to match the diffusion range
latents = F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
# timestep ~ U(0.02, 0.98) to avoid very high/low noise level
t = torch.randint(
self.min_step,
self.max_step + 1,
[batch_size],
dtype=torch.long,
device=self.device,
)
if prompt_utils.use_perp_neg:
(
text_embeddings,
neg_guidance_weights,
) = prompt_utils.get_text_embeddings_perp_neg(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
noise = torch.randn_like(latents)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 4),
encoder_hidden_states=text_embeddings,
) # (4B, 6, 64, 64)
noise_pred_text, _ = noise_pred[:batch_size].split(3, dim=1)
noise_pred_uncond, _ = noise_pred[batch_size : batch_size * 2].split(
3, dim=1
)
noise_pred_neg, _ = noise_pred[batch_size * 2 :].split(3, dim=1)
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
neg_guidance_weights = None
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=text_embeddings,
) # (2B, 6, 64, 64)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred_text, predicted_variance = noise_pred_text.split(3, dim=1)
noise_pred_uncond, _ = noise_pred_uncond.split(3, dim=1)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
"""
# thresholding, experimental
if self.cfg.thresholding:
assert batch_size == 1
noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
noise_pred = custom_ddpm_step(self.scheduler,
noise_pred, int(t.item()), latents_noisy, **self.pipe.prepare_extra_step_kwargs(None, 0.0)
)
"""
if self.cfg.weighting_strategy == "sds":
# w(t), sigma_t^2
w = (1 - self.alphas[t]).view(-1, 1, 1, 1)
elif self.cfg.weighting_strategy == "uniform":
w = 1
elif self.cfg.weighting_strategy == "fantasia3d":
w = (self.alphas[t] ** 0.5 * (1 - self.alphas[t])).view(-1, 1, 1, 1)
else:
raise ValueError(
f"Unknown weighting strategy: {self.cfg.weighting_strategy}"
)
grad = w * (noise_pred - noise)
grad = torch.nan_to_num(grad)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# loss = SpecifyGradient.apply(latents, grad)
# SpecifyGradient is not straghtforward, use a reparameterization trick instead
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss_sds = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
try:
self.rm_input_ids
except:
self.rm_input_ids = [0,0,0,0]
self.rm_attention_mask = [0,0,0,0]
prompts_vds = prompt_utils.prompts_vd
for idx in range(4):
prompts_vd = prompts_vds[idx]
print(prompts_vd)
g = self.reward_model.blip.tokenizer(prompts_vd, padding='max_length', truncation=True, max_length=35, return_tensors="pt")
self.rm_input_ids[idx] = g.input_ids
self.rm_attention_mask[idx] = g.attention_mask
# Get direction
direction_idx = torch.zeros_like(elevation, dtype=torch.long)
for d in prompt_utils.directions:
direction_idx[
d.condition(elevation, azimuth, camera_distances)
] = prompt_utils.direction2idx[d.name]
# Compute reward loss
try:
self.batch_idx += 1
except:
self.batch_idx = 1
a=list(range(400,10000,400))
b=[list(range(x,x+400)) for x in a]
c=[]
for i in range(len(b)):
c+=b[i]
#adding_reward = True
adding_reward = self.batch_idx in c
if adding_reward:
image = rgb_BCHW
rm_preprocess = _transform()
image = rm_preprocess(image)
rm_input_ids = torch.cat([self.rm_input_ids[idx] for idx in direction_idx]).to(self.device)
rm_attention_mask = torch.cat([self.rm_attention_mask[idx] for idx in direction_idx]).to(self.device)
self.reward_model.train()
rewards = self.reward_model.score_gard(rm_input_ids, rm_attention_mask, image)
loss_reward = F.relu(-rewards+2).mean()
else:
loss_reward = 0
def calculate_weight(a, b, n=0):
diff = len(str(a).split(".")[0])-len(str(b).split(".")[0])
weight = 10**(diff - n)
return weight
if adding_reward:
weight = calculate_weight(loss_sds.item(),loss_reward.item())
print(f"sds-loss:{loss_sds.item()},image-reward-loss:{loss_reward.item()*weight},reward:{rewards.mean().item()}")
loss = loss_reward*10
else:
loss = 0
guidance_out = {
"loss_sds": loss,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
if guidance_eval:
guidance_eval_utils = {
"use_perp_neg": prompt_utils.use_perp_neg,
"neg_guidance_weights": neg_guidance_weights,
"text_embeddings": text_embeddings,
"t_orig": t,
"latents_noisy": latents_noisy,
"noise_pred": torch.cat([noise_pred, predicted_variance], dim=1),
}
guidance_eval_out = self.guidance_eval(**guidance_eval_utils)
texts = []
for n, e, a, c in zip(
guidance_eval_out["noise_levels"], elevation, azimuth, camera_distances
):
texts.append(
f"n{n:.02f}\ne{e.item():.01f}\na{a.item():.01f}\nc{c.item():.02f}"
)
guidance_eval_out.update({"texts": texts})
guidance_out.update({"eval": guidance_eval_out})
return guidance_out
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def get_noise_pred(
self,
latents_noisy,
t,
text_embeddings,
use_perp_neg=False,
neg_guidance_weights=None,
):
batch_size = latents_noisy.shape[0]
if use_perp_neg:
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 4).to(self.device),
encoder_hidden_states=text_embeddings,
) # (4B, 6, 64, 64)
noise_pred_text, _ = noise_pred[:batch_size].split(3, dim=1)
noise_pred_uncond, _ = noise_pred[batch_size : batch_size * 2].split(
3, dim=1
)
noise_pred_neg, _ = noise_pred[batch_size * 2 :].split(3, dim=1)
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 2).to(self.device),
encoder_hidden_states=text_embeddings,
) # (2B, 6, 64, 64)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred_text, predicted_variance = noise_pred_text.split(3, dim=1)
noise_pred_uncond, _ = noise_pred_uncond.split(3, dim=1)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
return torch.cat([noise_pred, predicted_variance], dim=1)
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def guidance_eval(
self,
t_orig,
text_embeddings,
latents_noisy,
noise_pred,
use_perp_neg=False,
neg_guidance_weights=None,
):
# use only 50 timesteps, and find nearest of those to t
self.scheduler.set_timesteps(50)
self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device)
bs = (
min(self.cfg.max_items_eval, latents_noisy.shape[0])
if self.cfg.max_items_eval > 0
else latents_noisy.shape[0]
) # batch size
large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[
:bs
].unsqueeze(
-1
) # sized [bs,50] > [bs,1]
idxs = torch.min(large_enough_idxs, dim=1)[1]
t = self.scheduler.timesteps_gpu[idxs]
fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy())
imgs_noisy = (latents_noisy[:bs] / 2 + 0.5).permute(0, 2, 3, 1)
# get prev latent
latents_1step = []
pred_1orig = []
for b in range(bs):
step_output = self.scheduler.step(
noise_pred[b : b + 1], t[b], latents_noisy[b : b + 1]
)
latents_1step.append(step_output["prev_sample"])
pred_1orig.append(step_output["pred_original_sample"])
latents_1step = torch.cat(latents_1step)
pred_1orig = torch.cat(pred_1orig)
imgs_1step = (latents_1step / 2 + 0.5).permute(0, 2, 3, 1)
imgs_1orig = (pred_1orig / 2 + 0.5).permute(0, 2, 3, 1)
latents_final = []
for b, i in enumerate(idxs):
latents = latents_1step[b : b + 1]
text_emb = (
text_embeddings[
[b, b + len(idxs), b + 2 * len(idxs), b + 3 * len(idxs)], ...
]
if use_perp_neg
else text_embeddings[[b, b + len(idxs)], ...]
)
neg_guid = neg_guidance_weights[b : b + 1] if use_perp_neg else None
for t in tqdm(self.scheduler.timesteps[i + 1 :], leave=False):
# pred noise
noise_pred = self.get_noise_pred(
latents, t, text_emb, use_perp_neg, neg_guid
)
# get prev latent
latents = self.scheduler.step(noise_pred, t, latents)["prev_sample"]
latents_final.append(latents)
latents_final = torch.cat(latents_final)
imgs_final = (latents_final / 2 + 0.5).permute(0, 2, 3, 1)
return {
"bs": bs,
"noise_levels": fracs,
"imgs_noisy": imgs_noisy,
"imgs_1step": imgs_1step,
"imgs_1orig": imgs_1orig,
"imgs_final": imgs_final,
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.set_min_max_steps(
min_step_percent=C(self.cfg.min_step_percent, epoch, global_step),
max_step_percent=C(self.cfg.max_step_percent, epoch, global_step),
)
"""
# used by thresholding, experimental
def custom_ddpm_step(ddpm, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator=None, return_dict: bool = True):
self = ddpm
t = timestep
prev_t = self.previous_timestep(t)
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
else:
predicted_variance = None
# 1. compute alphas, betas
alpha_prod_t = self.alphas_cumprod[t].item()
alpha_prod_t_prev = self.alphas_cumprod[prev_t].item() if prev_t >= 0 else 1.0
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
current_alpha_t = alpha_prod_t / alpha_prod_t_prev
current_beta_t = 1 - current_alpha_t
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
elif self.config.prediction_type == "sample":
pred_original_sample = model_output
elif self.config.prediction_type == "v_prediction":
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
" `v_prediction` for the DDPMScheduler."
)
# 3. Clip or threshold "predicted x_0"
if self.config.thresholding:
pred_original_sample = self._threshold_sample(pred_original_sample)
elif self.config.clip_sample:
pred_original_sample = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range
)
noise_thresholded = (sample - (alpha_prod_t ** 0.5) * pred_original_sample) / (beta_prod_t ** 0.5)
return noise_thresholded
"""
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/zero123_guidance.py | threestudio/models/guidance/zero123_guidance.py | import importlib
import os
from dataclasses import dataclass, field
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from diffusers import DDIMScheduler, DDPMScheduler, StableDiffusionPipeline
from diffusers.utils.import_utils import is_xformers_available
from omegaconf import OmegaConf
from tqdm import tqdm
import threestudio
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.typing import *
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
if config == "__is_first_stage__":
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# load model
def load_model_from_config(config, ckpt, device, vram_O=True, verbose=False):
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd and verbose:
print(f'[INFO] Global Step: {pl_sd["global_step"]}')
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("[INFO] missing keys: \n", m)
if len(u) > 0 and verbose:
print("[INFO] unexpected keys: \n", u)
# manually load ema and delete it to save GPU memory
if model.use_ema:
if verbose:
print("[INFO] loading EMA...")
model.model_ema.copy_to(model.model)
del model.model_ema
if vram_O:
# we don't need decoder
del model.first_stage_model.decoder
torch.cuda.empty_cache()
model.eval().to(device)
return model
@threestudio.register("zero123-guidance")
class Zero123Guidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
pretrained_model_name_or_path: str = "load/zero123/105000.ckpt"
pretrained_config: str = "load/zero123/sd-objaverse-finetune-c_concat-256.yaml"
vram_O: bool = True
cond_image_path: str = "load/images/hamburger_rgba.png"
cond_elevation_deg: float = 0.0
cond_azimuth_deg: float = 0.0
cond_camera_distance: float = 1.2
guidance_scale: float = 5.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = False
min_step_percent: float = 0.02
max_step_percent: float = 0.98
"""Maximum number of batch items to evaluate guidance for (for debugging) and to save on disk. -1 means save all items."""
max_items_eval: int = 4
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Zero123 ...")
self.config = OmegaConf.load(self.cfg.pretrained_config)
# TODO: seems it cannot load into fp16...
self.weights_dtype = torch.float32
self.model = load_model_from_config(
self.config,
self.cfg.pretrained_model_name_or_path,
device=self.device,
vram_O=self.cfg.vram_O,
)
for p in self.model.parameters():
p.requires_grad_(False)
# timesteps: use diffuser for convenience... hope it's alright.
self.num_train_timesteps = self.config.model.params.timesteps
self.scheduler = DDIMScheduler(
self.num_train_timesteps,
self.config.model.params.linear_start,
self.config.model.params.linear_end,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
self.grad_clip_val: Optional[float] = None
self.prepare_embeddings(self.cfg.cond_image_path)
threestudio.info(f"Loaded Zero123!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@torch.cuda.amp.autocast(enabled=False)
def prepare_embeddings(self, image_path: str) -> None:
# load cond image for zero123
assert os.path.exists(image_path)
rgba = cv2.cvtColor(
cv2.imread(image_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGRA2RGBA
)
rgba = (
cv2.resize(rgba, (256, 256), interpolation=cv2.INTER_AREA).astype(
np.float32
)
/ 255.0
)
rgb = rgba[..., :3] * rgba[..., 3:] + (1 - rgba[..., 3:])
self.rgb_256: Float[Tensor, "1 3 H W"] = (
torch.from_numpy(rgb)
.unsqueeze(0)
.permute(0, 3, 1, 2)
.contiguous()
.to(self.device)
)
self.c_crossattn, self.c_concat = self.get_img_embeds(self.rgb_256)
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def get_img_embeds(
self,
img: Float[Tensor, "B 3 256 256"],
) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]:
img = img * 2.0 - 1.0
c_crossattn = self.model.get_learned_conditioning(img.to(self.weights_dtype))
c_concat = self.model.encode_first_stage(img.to(self.weights_dtype)).mode()
return c_crossattn, c_concat
@torch.cuda.amp.autocast(enabled=False)
def encode_images(
self, imgs: Float[Tensor, "B 3 256 256"]
) -> Float[Tensor, "B 4 32 32"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
latents = self.model.get_first_stage_encoding(
self.model.encode_first_stage(imgs.to(self.weights_dtype))
)
return latents.to(input_dtype) # [B, 4, 32, 32] Latent space image
@torch.cuda.amp.autocast(enabled=False)
def decode_latents(
self,
latents: Float[Tensor, "B 4 H W"],
) -> Float[Tensor, "B 3 512 512"]:
input_dtype = latents.dtype
image = self.model.decode_first_stage(latents)
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def get_cond(
self,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
c_crossattn=None,
c_concat=None,
**kwargs,
) -> dict:
T = torch.stack(
[
torch.deg2rad(
(90 - elevation) - (90 - self.cfg.cond_elevation_deg)
), # Zero123 polar is 90-elevation
torch.sin(torch.deg2rad(azimuth - self.cfg.cond_azimuth_deg)),
torch.cos(torch.deg2rad(azimuth - self.cfg.cond_azimuth_deg)),
camera_distances - self.cfg.cond_camera_distance,
],
dim=-1,
)[:, None, :].to(self.device)
cond = {}
clip_emb = self.model.cc_projection(
torch.cat(
[
(self.c_crossattn if c_crossattn is None else c_crossattn).repeat(
len(T), 1, 1
),
T,
],
dim=-1,
)
)
cond["c_crossattn"] = [
torch.cat([torch.zeros_like(clip_emb).to(self.device), clip_emb], dim=0)
]
cond["c_concat"] = [
torch.cat(
[
torch.zeros_like(self.c_concat)
.repeat(len(T), 1, 1, 1)
.to(self.device),
(self.c_concat if c_concat is None else c_concat).repeat(
len(T), 1, 1, 1
),
],
dim=0,
)
]
return cond
def __call__(
self,
rgb: Float[Tensor, "B H W C"],
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
rgb_as_latents=False,
guidance_eval=False,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents: Float[Tensor, "B 4 64 64"]
if rgb_as_latents:
latents = (
F.interpolate(rgb_BCHW, (32, 32), mode="bilinear", align_corners=False)
* 2
- 1
)
else:
rgb_BCHW_512 = F.interpolate(
rgb_BCHW, (256, 256), mode="bilinear", align_corners=False
)
# encode image into latents with vae
latents = self.encode_images(rgb_BCHW_512)
cond = self.get_cond(elevation, azimuth, camera_distances)
# timestep ~ U(0.02, 0.98) to avoid very high/low noise level
t = torch.randint(
self.min_step,
self.max_step + 1,
[batch_size],
dtype=torch.long,
device=self.device,
)
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
x_in = torch.cat([latents_noisy] * 2)
t_in = torch.cat([t] * 2)
noise_pred = self.model.apply_model(x_in, t_in, cond)
# perform guidance
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_cond - noise_pred_uncond
)
w = (1 - self.alphas[t]).reshape(-1, 1, 1, 1)
grad = w * (noise_pred - noise)
grad = torch.nan_to_num(grad)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# loss = SpecifyGradient.apply(latents, grad)
# SpecifyGradient is not straghtforward, use a reparameterization trick instead
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss_sds = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
guidance_out = {
"loss_sds": loss_sds,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
if guidance_eval:
guidance_eval_utils = {
"cond": cond,
"t_orig": t,
"latents_noisy": latents_noisy,
"noise_pred": noise_pred,
}
guidance_eval_out = self.guidance_eval(**guidance_eval_utils)
texts = []
for n, e, a, c in zip(
guidance_eval_out["noise_levels"], elevation, azimuth, camera_distances
):
texts.append(
f"n{n:.02f}\ne{e.item():.01f}\na{a.item():.01f}\nc{c.item():.02f}"
)
guidance_eval_out.update({"texts": texts})
guidance_out.update({"eval": guidance_eval_out})
return guidance_out
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def guidance_eval(self, cond, t_orig, latents_noisy, noise_pred):
# use only 50 timesteps, and find nearest of those to t
self.scheduler.set_timesteps(50)
self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device)
bs = (
min(self.cfg.max_items_eval, latents_noisy.shape[0])
if self.cfg.max_items_eval > 0
else latents_noisy.shape[0]
) # batch size
large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[
:bs
].unsqueeze(
-1
) # sized [bs,50] > [bs,1]
idxs = torch.min(large_enough_idxs, dim=1)[1]
t = self.scheduler.timesteps_gpu[idxs]
fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy())
imgs_noisy = self.decode_latents(latents_noisy[:bs]).permute(0, 2, 3, 1)
# get prev latent
latents_1step = []
pred_1orig = []
for b in range(bs):
step_output = self.scheduler.step(
noise_pred[b : b + 1], t[b], latents_noisy[b : b + 1], eta=1
)
latents_1step.append(step_output["prev_sample"])
pred_1orig.append(step_output["pred_original_sample"])
latents_1step = torch.cat(latents_1step)
pred_1orig = torch.cat(pred_1orig)
imgs_1step = self.decode_latents(latents_1step).permute(0, 2, 3, 1)
imgs_1orig = self.decode_latents(pred_1orig).permute(0, 2, 3, 1)
latents_final = []
for b, i in enumerate(idxs):
latents = latents_1step[b : b + 1]
c = {
"c_crossattn": [cond["c_crossattn"][0][[b, b + len(idxs)], ...]],
"c_concat": [cond["c_concat"][0][[b, b + len(idxs)], ...]],
}
for t in tqdm(self.scheduler.timesteps[i + 1 :], leave=False):
# pred noise
x_in = torch.cat([latents] * 2)
t_in = torch.cat([t.reshape(1)] * 2).to(self.device)
noise_pred = self.model.apply_model(x_in, t_in, c)
# perform guidance
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_cond - noise_pred_uncond
)
# get prev latent
latents = self.scheduler.step(noise_pred, t, latents, eta=1)[
"prev_sample"
]
latents_final.append(latents)
latents_final = torch.cat(latents_final)
imgs_final = self.decode_latents(latents_final).permute(0, 2, 3, 1)
return {
"bs": bs,
"noise_levels": fracs,
"imgs_noisy": imgs_noisy,
"imgs_1step": imgs_1step,
"imgs_1orig": imgs_1orig,
"imgs_final": imgs_final,
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.set_min_max_steps(
min_step_percent=C(self.cfg.min_step_percent, epoch, global_step),
max_step_percent=C(self.cfg.max_step_percent, epoch, global_step),
)
# verification - requires `vram_O = False` in load_model_from_config
@torch.no_grad()
def generate(
self,
image, # image tensor [1, 3, H, W] in [0, 1]
elevation=0,
azimuth=0,
camera_distances=0, # new view params
c_crossattn=None,
c_concat=None,
scale=3,
ddim_steps=50,
post_process=True,
ddim_eta=1,
):
if c_crossattn is None:
c_crossattn, c_concat = self.get_img_embeds(image)
cond = self.get_cond(
elevation, azimuth, camera_distances, c_crossattn, c_concat
)
imgs = self.gen_from_cond(cond, scale, ddim_steps, post_process, ddim_eta)
return imgs
# verification - requires `vram_O = False` in load_model_from_config
@torch.no_grad()
def gen_from_cond(
self,
cond,
scale=3,
ddim_steps=50,
post_process=True,
ddim_eta=1,
):
# produce latents loop
B = cond["c_crossattn"][0].shape[0] // 2
latents = torch.randn((B, 4, 32, 32), device=self.device)
self.scheduler.set_timesteps(ddim_steps)
for t in self.scheduler.timesteps:
x_in = torch.cat([latents] * 2)
t_in = torch.cat([t.reshape(1).repeat(B)] * 2).to(self.device)
noise_pred = self.model.apply_model(x_in, t_in, cond)
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + scale * (
noise_pred_cond - noise_pred_uncond
)
latents = self.scheduler.step(noise_pred, t, latents, eta=ddim_eta)[
"prev_sample"
]
imgs = self.decode_latents(latents)
imgs = imgs.cpu().numpy().transpose(0, 2, 3, 1) if post_process else imgs
return imgs
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/instructpix2pix_guidance.py | threestudio/models/guidance/instructpix2pix_guidance.py | from dataclasses import dataclass
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from diffusers import DDIMScheduler, StableDiffusionInstructPix2PixPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.typing import *
@threestudio.register("stable-diffusion-instructpix2pix-guidance")
class InstructPix2PixGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
cache_dir: Optional[str] = None
ddim_scheduler_name_or_path: str = "CompVis/stable-diffusion-v1-4"
ip2p_name_or_path: str = "timbrooks/instruct-pix2pix"
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 7.5
condition_scale: float = 1.5
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
fixed_size: int = -1
min_step_percent: float = 0.02
max_step_percent: float = 0.98
diffusion_steps: int = 20
use_sds: bool = False
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading InstructPix2Pix ...")
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
"cache_dir": self.cfg.cache_dir,
}
self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
self.cfg.ip2p_name_or_path, **pipe_kwargs
).to(self.device)
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.ddim_scheduler_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
self.pipe.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
self.pipe.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
self.pipe.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
self.pipe.unet.to(memory_format=torch.channels_last)
# Create model
self.vae = self.pipe.vae.eval()
self.unet = self.pipe.unet.eval()
for p in self.vae.parameters():
p.requires_grad_(False)
for p in self.unet.parameters():
p.requires_grad_(False)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
self.grad_clip_val: Optional[float] = None
threestudio.info(f"Loaded InstructPix2Pix!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return self.unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
).sample.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_images(
self, imgs: Float[Tensor, "B 3 H W"]
) -> Float[Tensor, "B 4 DH DW"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.sample() * self.vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_cond_images(
self, imgs: Float[Tensor, "B 3 H W"]
) -> Float[Tensor, "B 4 DH DW"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.mode()
uncond_image_latents = torch.zeros_like(latents)
latents = torch.cat([latents, latents, uncond_image_latents], dim=0)
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def decode_latents(
self, latents: Float[Tensor, "B 4 DH DW"]
) -> Float[Tensor, "B 3 H W"]:
input_dtype = latents.dtype
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents.to(self.weights_dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
def edit_latents(
self,
text_embeddings: Float[Tensor, "BB 77 768"],
latents: Float[Tensor, "B 4 DH DW"],
image_cond_latents: Float[Tensor, "B 4 DH DW"],
t: Int[Tensor, "B"],
) -> Float[Tensor, "B 4 DH DW"]:
self.scheduler.config.num_train_timesteps = t.item()
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
with torch.no_grad():
# add noise
noise = torch.randn_like(latents)
latents = self.scheduler.add_noise(latents, noise, t) # type: ignore
threestudio.debug("Start editing...")
# sections of code used from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py
for i, t in enumerate(self.scheduler.timesteps):
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# pred noise
latent_model_input = torch.cat([latents] * 3)
latent_model_input = torch.cat(
[latent_model_input, image_cond_latents], dim=1
)
noise_pred = self.forward_unet(
latent_model_input, t, encoder_hidden_states=text_embeddings
)
# perform classifier-free guidance
noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(
3
)
noise_pred = (
noise_pred_uncond
+ self.cfg.guidance_scale * (noise_pred_text - noise_pred_image)
+ self.cfg.condition_scale * (noise_pred_image - noise_pred_uncond)
)
# get previous sample, continue loop
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
threestudio.debug("Editing finished.")
return latents
def compute_grad_sds(
self,
text_embeddings: Float[Tensor, "BB 77 768"],
latents: Float[Tensor, "B 4 DH DW"],
image_cond_latents: Float[Tensor, "B 4 DH DW"],
t: Int[Tensor, "B"],
):
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 3)
latent_model_input = torch.cat(
[latent_model_input, image_cond_latents], dim=1
)
noise_pred = self.forward_unet(
latent_model_input, t, encoder_hidden_states=text_embeddings
)
noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3)
noise_pred = (
noise_pred_uncond
+ self.cfg.guidance_scale * (noise_pred_text - noise_pred_image)
+ self.cfg.condition_scale * (noise_pred_image - noise_pred_uncond)
)
w = (1 - self.alphas[t]).view(-1, 1, 1, 1)
grad = w * (noise_pred - noise)
return grad
def __call__(
self,
rgb: Float[Tensor, "B H W C"],
cond_rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
**kwargs,
):
batch_size, H, W, _ = rgb.shape
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents: Float[Tensor, "B 4 DH DW"]
if self.cfg.fixed_size > 0:
RH, RW = self.cfg.fixed_size, self.cfg.fixed_size
else:
RH, RW = H // 8 * 8, W // 8 * 8
rgb_BCHW_HW8 = F.interpolate(
rgb_BCHW, (RH, RW), mode="bilinear", align_corners=False
)
latents = self.encode_images(rgb_BCHW_HW8)
cond_rgb_BCHW = cond_rgb.permute(0, 3, 1, 2)
cond_rgb_BCHW_HW8 = F.interpolate(
cond_rgb_BCHW,
(RH, RW),
mode="bilinear",
align_corners=False,
)
cond_latents = self.encode_cond_images(cond_rgb_BCHW_HW8)
temp = torch.zeros(1).to(rgb.device)
text_embeddings = prompt_utils.get_text_embeddings(temp, temp, temp, False)
text_embeddings = torch.cat(
[text_embeddings, text_embeddings[-1:]], dim=0
) # [positive, negative, negative]
# timestep ~ U(0.02, 0.98) to avoid very high/low noise level
t = torch.randint(
self.min_step,
self.max_step + 1,
[batch_size],
dtype=torch.long,
device=self.device,
)
if self.cfg.use_sds:
grad = self.compute_grad_sds(text_embeddings, latents, cond_latents, t)
grad = torch.nan_to_num(grad)
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
target = (latents - grad).detach()
loss_sds = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
return {
"loss_sds": loss_sds,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
else:
edit_latents = self.edit_latents(text_embeddings, latents, cond_latents, t)
edit_images = self.decode_latents(edit_latents)
edit_images = F.interpolate(edit_images, (H, W), mode="bilinear")
return {"edit_images": edit_images.permute(0, 2, 3, 1)}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.set_min_max_steps(
min_step_percent=C(self.cfg.min_step_percent, epoch, global_step),
max_step_percent=C(self.cfg.max_step_percent, epoch, global_step),
)
if __name__ == "__main__":
from threestudio.utils.config import ExperimentConfig, load_config
from threestudio.utils.typing import Optional
cfg = load_config("configs/debugging/instructpix2pix.yaml")
guidance = threestudio.find(cfg.system.guidance_type)(cfg.system.guidance)
prompt_processor = threestudio.find(cfg.system.prompt_processor_type)(
cfg.system.prompt_processor
)
rgb_image = cv2.imread("assets/face.jpg")[:, :, ::-1].copy() / 255
rgb_image = torch.FloatTensor(rgb_image).unsqueeze(0).to(guidance.device)
prompt_utils = prompt_processor()
guidance_out = guidance(rgb_image, rgb_image, prompt_utils)
edit_image = (
(
guidance_out["edit_images"][0]
.permute(1, 2, 0)
.detach()
.cpu()
.clip(0, 1)
.numpy()
* 255
)
.astype(np.uint8)[:, :, ::-1]
.copy()
)
import os
os.makedirs(".threestudio_cache", exist_ok=True)
cv2.imwrite(".threestudio_cache/edit_image.jpg", edit_image)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/guidance/zero123_unified_guidance.py | threestudio/models/guidance/zero123_unified_guidance.py | import os
import random
import sys
from contextlib import contextmanager
from dataclasses import dataclass, field
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DPMSolverSinglestepScheduler,
UNet2DConditionModel,
)
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.models.embeddings import TimestepEmbedding
from PIL import Image
from tqdm import tqdm
import threestudio
from extern.zero123 import Zero123Pipeline
from threestudio.models.networks import ToDTypeWrapper
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, enable_gradient, parse_version
from threestudio.utils.typing import *
@threestudio.register("zero123-unified-guidance")
class Zero123UnifiedGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
# guidance type, in ["sds", "vsd"]
guidance_type: str = "sds"
pretrained_model_name_or_path: str = "bennyguo/zero123-diffusers"
guidance_scale: float = 5.0
weighting_strategy: str = "dreamfusion"
min_step_percent: Any = 0.02
max_step_percent: Any = 0.98
grad_clip: Optional[Any] = None
return_rgb_1step_orig: bool = False
return_rgb_multistep_orig: bool = False
n_rgb_multistep_orig_steps: int = 4
cond_image_path: str = ""
cond_elevation_deg: float = 0.0
cond_azimuth_deg: float = 0.0
cond_camera_distance: float = 1.2
# efficiency-related configurations
half_precision_weights: bool = True
# VSD configurations, only used when guidance_type is "vsd"
vsd_phi_model_name_or_path: Optional[str] = None
vsd_guidance_scale_phi: float = 1.0
vsd_use_lora: bool = True
vsd_lora_cfg_training: bool = False
vsd_lora_n_timestamp_samples: int = 1
vsd_use_camera_condition: bool = True
# camera condition type, in ["extrinsics", "mvp", "spherical"]
vsd_camera_condition_type: Optional[str] = "extrinsics"
cfg: Config
def configure(self) -> None:
self.min_step: Optional[int] = None
self.max_step: Optional[int] = None
self.grad_clip_val: Optional[float] = None
@dataclass
class NonTrainableModules:
pipe: Zero123Pipeline
pipe_phi: Optional[Zero123Pipeline] = None
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
threestudio.info(f"Loading Zero123 ...")
# need to make sure the pipeline file is in path
sys.path.append("extern/")
pipe_kwargs = {
"safety_checker": None,
"requires_safety_checker": False,
"variant": "fp16" if self.cfg.half_precision_weights else None,
"torch_dtype": self.weights_dtype,
}
pipe = Zero123Pipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe)
# phi network for VSD
# introduce two trainable modules:
# - self.camera_embedding
# - self.lora_layers
pipe_phi = None
# if the phi network shares the same unet with the pretrain network
# we need to pass additional cross attention kwargs to the unet
self.vsd_share_model = (
self.cfg.guidance_type == "vsd"
and self.cfg.vsd_phi_model_name_or_path is None
)
if self.cfg.guidance_type == "vsd":
if self.cfg.vsd_phi_model_name_or_path is None:
pipe_phi = pipe
else:
pipe_phi = Zero123Pipeline.from_pretrained(
self.cfg.vsd_phi_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe_phi)
# set up camera embedding
if self.cfg.vsd_use_camera_condition:
if self.cfg.vsd_camera_condition_type in ["extrinsics", "mvp"]:
self.camera_embedding_dim = 16
elif self.cfg.vsd_camera_condition_type == "spherical":
self.camera_embedding_dim = 4
else:
raise ValueError("Invalid camera condition type!")
# FIXME: hard-coded output dim
self.camera_embedding = ToDTypeWrapper(
TimestepEmbedding(self.camera_embedding_dim, 1280),
self.weights_dtype,
).to(self.device)
pipe_phi.unet.class_embedding = self.camera_embedding
if self.cfg.vsd_use_lora:
# set up LoRA layers
lora_attn_procs = {}
for name in pipe_phi.unet.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else pipe_phi.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipe_phi.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(
reversed(pipe_phi.unet.config.block_out_channels)
)[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipe_phi.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
pipe_phi.unet.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(pipe_phi.unet.attn_processors).to(
self.device
)
self.lora_layers._load_state_dict_pre_hooks.clear()
self.lora_layers._state_dict_hooks.clear()
threestudio.info(f"Loaded Stable Diffusion!")
self.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
# q(z_t|x) = N(alpha_t x, sigma_t^2 I)
# in DDPM, alpha_t = sqrt(alphas_cumprod_t), sigma_t^2 = 1 - alphas_cumprod_t
self.alphas_cumprod: Float[Tensor, "T"] = self.scheduler.alphas_cumprod.to(
self.device
)
self.alphas: Float[Tensor, "T"] = self.alphas_cumprod**0.5
self.sigmas: Float[Tensor, "T"] = (1 - self.alphas_cumprod) ** 0.5
# log SNR
self.lambdas: Float[Tensor, "T"] = self.sigmas / self.alphas
self._non_trainable_modules = NonTrainableModules(
pipe=pipe,
pipe_phi=pipe_phi,
)
# self.clip_image_embeddings and self.image_latents
self.prepare_image_embeddings()
@property
def pipe(self) -> Zero123Pipeline:
return self._non_trainable_modules.pipe
@property
def pipe_phi(self) -> Zero123Pipeline:
if self._non_trainable_modules.pipe_phi is None:
raise RuntimeError("phi model is not available.")
return self._non_trainable_modules.pipe_phi
def prepare_pipe(self, pipe: Zero123Pipeline):
cleanup()
pipe.image_encoder.eval()
pipe.vae.eval()
pipe.unet.eval()
pipe.clip_camera_projection.eval()
enable_gradient(pipe.image_encoder, enabled=False)
enable_gradient(pipe.vae, enabled=False)
enable_gradient(pipe.unet, enabled=False)
enable_gradient(pipe.clip_camera_projection, enabled=False)
# disable progress bar
pipe.set_progress_bar_config(disable=True)
def prepare_image_embeddings(self) -> None:
if not os.path.exists(self.cfg.cond_image_path):
raise RuntimeError(
f"Condition image not found at {self.cfg.cond_image_path}"
)
image = Image.open(self.cfg.cond_image_path).convert("RGBA").resize((256, 256))
image = (
TF.to_tensor(image)
.unsqueeze(0)
.to(device=self.device, dtype=self.weights_dtype)
)
# rgba -> rgb, apply white background
image = image[:, :3] * image[:, 3:4] + (1 - image[:, 3:4])
with torch.no_grad():
self.clip_image_embeddings: Float[
Tensor, "1 1 D"
] = self.extract_clip_image_embeddings(image)
# encoded latents should be multiplied with vae.config.scaling_factor
# but zero123 was not trained this way
self.image_latents: Float[Tensor, "1 4 Hl Wl"] = (
self.vae_encode(self.pipe.vae, image * 2.0 - 1.0, mode=True)
/ self.pipe.vae.config.scaling_factor
)
def extract_clip_image_embeddings(
self, images: Float[Tensor, "B 3 H W"]
) -> Float[Tensor, "B 1 D"]:
# expect images in [0, 1]
images_pil = [TF.to_pil_image(image) for image in images]
images_processed = self.pipe.feature_extractor(
images=images_pil, return_tensors="pt"
).pixel_values.to(device=self.device, dtype=self.weights_dtype)
clip_image_embeddings = self.pipe.image_encoder(images_processed).image_embeds
return clip_image_embeddings.to(images.dtype)
def get_image_camera_embeddings(
self,
elevation_deg: Float[Tensor, "B"],
azimuth_deg: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
) -> Float[Tensor, "B 1 D"]:
batch_size = elevation_deg.shape[0]
camera_embeddings: Float[Tensor, "B 1 4"] = torch.stack(
[
torch.deg2rad(self.cfg.cond_elevation_deg - elevation_deg),
torch.sin(torch.deg2rad(azimuth_deg - self.cfg.cond_azimuth_deg)),
torch.cos(torch.deg2rad(azimuth_deg - self.cfg.cond_azimuth_deg)),
camera_distances - self.cfg.cond_camera_distance,
],
dim=-1,
)[:, None, :]
image_camera_embeddings = self.pipe.clip_camera_projection(
torch.cat(
[
self.clip_image_embeddings.repeat(batch_size, 1, 1),
camera_embeddings,
],
dim=-1,
).to(self.weights_dtype)
)
return image_camera_embeddings
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
unet: UNet2DConditionModel,
latents: Float[Tensor, "..."],
t: Int[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
class_labels: Optional[Float[Tensor, "..."]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
down_block_additional_residuals: Optional[Float[Tensor, "..."]] = None,
mid_block_additional_residual: Optional[Float[Tensor, "..."]] = None,
velocity_to_epsilon: bool = False,
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
pred = unet(
latents.to(unet.dtype),
t.to(unet.dtype),
encoder_hidden_states=encoder_hidden_states.to(unet.dtype),
class_labels=class_labels,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_additional_residuals,
mid_block_additional_residual=mid_block_additional_residual,
).sample
if velocity_to_epsilon:
pred = latents * self.sigmas[t].view(-1, 1, 1, 1) + pred * self.alphas[
t
].view(-1, 1, 1, 1)
return pred.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def vae_encode(
self, vae: AutoencoderKL, imgs: Float[Tensor, "B 3 H W"], mode=False
) -> Float[Tensor, "B 4 Hl Wl"]:
# expect input in [-1, 1]
input_dtype = imgs.dtype
posterior = vae.encode(imgs.to(vae.dtype)).latent_dist
if mode:
latents = posterior.mode()
else:
latents = posterior.sample()
latents = latents * vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def vae_decode(
self, vae: AutoencoderKL, latents: Float[Tensor, "B 4 Hl Wl"]
) -> Float[Tensor, "B 3 H W"]:
# output in [0, 1]
input_dtype = latents.dtype
latents = 1 / vae.config.scaling_factor * latents
image = vae.decode(latents.to(vae.dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
@contextmanager
def disable_unet_class_embedding(self, unet: UNet2DConditionModel):
class_embedding = unet.class_embedding
try:
unet.class_embedding = None
yield unet
finally:
unet.class_embedding = class_embedding
@contextmanager
def set_scheduler(self, pipe: Zero123Pipeline, scheduler_class: Any, **kwargs):
scheduler_orig = pipe.scheduler
pipe.scheduler = scheduler_class.from_config(scheduler_orig.config, **kwargs)
yield pipe
pipe.scheduler = scheduler_orig
def get_eps_pretrain(
self,
latents_noisy: Float[Tensor, "B 4 Hl Wl"],
t: Int[Tensor, "B"],
image_camera_embeddings: Float[Tensor, "B 1 D"],
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
) -> Float[Tensor, "B 4 Hl Wl"]:
batch_size = latents_noisy.shape[0]
with torch.no_grad():
with self.disable_unet_class_embedding(self.pipe.unet) as unet:
noise_pred = self.forward_unet(
unet,
torch.cat(
[
torch.cat([latents_noisy] * 2, dim=0),
torch.cat(
[
self.image_latents.repeat(batch_size, 1, 1, 1),
torch.zeros_like(self.image_latents).repeat(
batch_size, 1, 1, 1
),
],
dim=0,
),
],
dim=1,
),
torch.cat([t] * 2, dim=0),
encoder_hidden_states=torch.cat(
[
image_camera_embeddings,
torch.zeros_like(image_camera_embeddings),
],
dim=0,
),
cross_attention_kwargs={"scale": 0.0}
if self.vsd_share_model
else None,
velocity_to_epsilon=self.pipe.scheduler.config.prediction_type
== "v_prediction",
)
noise_pred_image, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
noise_pred_image - noise_pred_uncond
)
return noise_pred
def get_eps_phi(
self,
latents_noisy: Float[Tensor, "B 4 Hl Wl"],
t: Int[Tensor, "B"],
image_camera_embeddings: Float[Tensor, "B 1 D"],
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
camera_condition: Float[Tensor, "B ..."],
) -> Float[Tensor, "B 4 Hl Wl"]:
batch_size = latents_noisy.shape[0]
with torch.no_grad():
noise_pred = self.forward_unet(
self.pipe_phi.unet,
torch.cat(
[
torch.cat([latents_noisy] * 2, dim=0),
torch.cat(
[self.image_latents.repeat(batch_size, 1, 1, 1)] * 2,
dim=0,
),
],
dim=1,
),
torch.cat([t] * 2, dim=0),
encoder_hidden_states=torch.cat([image_camera_embeddings] * 2, dim=0),
class_labels=torch.cat(
[
camera_condition.view(batch_size, -1),
torch.zeros_like(camera_condition.view(batch_size, -1)),
],
dim=0,
)
if self.cfg.vsd_use_camera_condition
else None,
cross_attention_kwargs={"scale": 1.0},
velocity_to_epsilon=self.pipe_phi.scheduler.config.prediction_type
== "v_prediction",
)
noise_pred_camera, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.cfg.vsd_guidance_scale_phi * (
noise_pred_camera - noise_pred_uncond
)
return noise_pred
def train_phi(
self,
latents: Float[Tensor, "B 4 Hl Wl"],
image_camera_embeddings: Float[Tensor, "B 1 D"],
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
camera_condition: Float[Tensor, "B ..."],
):
B = latents.shape[0]
latents = latents.detach().repeat(
self.cfg.vsd_lora_n_timestamp_samples, 1, 1, 1
)
num_train_timesteps = self.pipe_phi.scheduler.config.num_train_timesteps
t = torch.randint(
int(num_train_timesteps * 0.0),
int(num_train_timesteps * 1.0),
[B * self.cfg.vsd_lora_n_timestamp_samples],
dtype=torch.long,
device=self.device,
)
noise = torch.randn_like(latents)
latents_noisy = self.pipe_phi.scheduler.add_noise(latents, noise, t)
if self.pipe_phi.scheduler.config.prediction_type == "epsilon":
target = noise
elif self.pipe_phi.scheduler.prediction_type == "v_prediction":
target = self.pipe_phi.scheduler.get_velocity(latents, noise, t)
else:
raise ValueError(
f"Unknown prediction type {self.pipe_phi.scheduler.prediction_type}"
)
if (
self.cfg.vsd_use_camera_condition
and self.cfg.vsd_lora_cfg_training
and random.random() < 0.1
):
camera_condition = torch.zeros_like(camera_condition)
noise_pred = self.forward_unet(
self.pipe_phi.unet,
torch.cat([latents_noisy, self.image_latents.repeat(B, 1, 1, 1)], dim=1),
t,
encoder_hidden_states=image_camera_embeddings.repeat(
self.cfg.vsd_lora_n_timestamp_samples, 1, 1
),
class_labels=camera_condition.view(B, -1).repeat(
self.cfg.vsd_lora_n_timestamp_samples, 1
)
if self.cfg.vsd_use_camera_condition
else None,
cross_attention_kwargs={"scale": 1.0},
)
return F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
def forward(
self,
rgb: Float[Tensor, "B H W C"],
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
mvp_mtx: Float[Tensor, "B 4 4"],
c2w: Float[Tensor, "B 4 4"],
rgb_as_latents=False,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents: Float[Tensor, "B 4 32 32"]
if rgb_as_latents:
# treat input rgb as latents
# input rgb should be in range [-1, 1]
latents = F.interpolate(
rgb_BCHW, (32, 32), mode="bilinear", align_corners=False
)
else:
# treat input rgb as rgb
# input rgb should be in range [0, 1]
rgb_BCHW = F.interpolate(
rgb_BCHW, (256, 256), mode="bilinear", align_corners=False
)
# encode image into latents with vae
latents = self.vae_encode(self.pipe.vae, rgb_BCHW * 2.0 - 1.0)
# sample timestep
# use the same timestep for each batch
assert self.min_step is not None and self.max_step is not None
t = torch.randint(
self.min_step,
self.max_step + 1,
[1],
dtype=torch.long,
device=self.device,
).repeat(batch_size)
# sample noise
noise = torch.randn_like(latents)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# image-camera feature condition
image_camera_embeddings = self.get_image_camera_embeddings(
elevation, azimuth, camera_distances
)
eps_pretrain = self.get_eps_pretrain(
latents_noisy,
t,
image_camera_embeddings,
elevation,
azimuth,
camera_distances,
)
latents_1step_orig = (
1
/ self.alphas[t].view(-1, 1, 1, 1)
* (latents_noisy - self.sigmas[t].view(-1, 1, 1, 1) * eps_pretrain)
).detach()
if self.cfg.guidance_type == "sds":
eps_phi = noise
elif self.cfg.guidance_type == "vsd":
if self.cfg.vsd_camera_condition_type == "extrinsics":
camera_condition = c2w
elif self.cfg.vsd_camera_condition_type == "mvp":
camera_condition = mvp_mtx
elif self.cfg.vsd_camera_condition_type == "spherical":
camera_condition = torch.stack(
[
torch.deg2rad(elevation),
torch.sin(torch.deg2rad(azimuth)),
torch.cos(torch.deg2rad(azimuth)),
camera_distances,
],
dim=-1,
)
else:
raise ValueError(
f"Unknown camera_condition_type {self.cfg.vsd_camera_condition_type}"
)
eps_phi = self.get_eps_phi(
latents_noisy,
t,
image_camera_embeddings,
elevation,
azimuth,
camera_distances,
camera_condition,
)
loss_train_phi = self.train_phi(
latents,
image_camera_embeddings,
elevation,
azimuth,
camera_distances,
camera_condition,
)
if self.cfg.weighting_strategy == "dreamfusion":
w = (1.0 - self.alphas[t]).view(-1, 1, 1, 1)
elif self.cfg.weighting_strategy == "uniform":
w = 1.0
elif self.cfg.weighting_strategy == "fantasia3d":
w = (self.alphas[t] ** 0.5 * (1 - self.alphas[t])).view(-1, 1, 1, 1)
else:
raise ValueError(
f"Unknown weighting strategy: {self.cfg.weighting_strategy}"
)
grad = w * (eps_pretrain - eps_phi)
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# reparameterization trick:
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
target = (latents - grad).detach()
loss_sd = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
guidance_out = {
"loss_sd": loss_sd,
"grad_norm": grad.norm(),
"timesteps": t,
"min_step": self.min_step,
"max_step": self.max_step,
"latents": latents,
"latents_1step_orig": latents_1step_orig,
"rgb": rgb_BCHW.permute(0, 2, 3, 1),
"weights": w,
"lambdas": self.lambdas[t],
}
if self.cfg.return_rgb_1step_orig:
with torch.no_grad():
rgb_1step_orig = self.vae_decode(
self.pipe.vae, latents_1step_orig
).permute(0, 2, 3, 1)
guidance_out.update({"rgb_1step_orig": rgb_1step_orig})
if self.cfg.return_rgb_multistep_orig:
with self.set_scheduler(
self.pipe,
DPMSolverSinglestepScheduler,
solver_order=1,
num_train_timesteps=int(t[0]),
) as pipe:
with torch.cuda.amp.autocast(enabled=False):
latents_multistep_orig = pipe(
num_inference_steps=self.cfg.n_rgb_multistep_orig_steps,
guidance_scale=self.cfg.guidance_scale,
eta=1.0,
latents=latents_noisy.to(pipe.unet.dtype),
image_camera_embeddings=image_camera_embeddings.to(
pipe.unet.dtype
),
image_latents=self.image_latents.repeat(batch_size, 1, 1, 1).to(
pipe.unet.dtype
),
cross_attention_kwargs={"scale": 0.0}
if self.vsd_share_model
else None,
output_type="latent",
).images.to(latents.dtype)
with torch.no_grad():
rgb_multistep_orig = self.vae_decode(
self.pipe.vae, latents_multistep_orig
)
guidance_out.update(
{
"latents_multistep_orig": latents_multistep_orig,
"rgb_multistep_orig": rgb_multistep_orig.permute(0, 2, 3, 1),
}
)
if self.cfg.guidance_type == "vsd":
guidance_out.update(
{
"loss_train_phi": loss_train_phi,
}
)
return guidance_out
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step)
self.min_step = int(
self.num_train_timesteps * C(self.cfg.min_step_percent, epoch, global_step)
)
self.max_step = int(
self.num_train_timesteps * C(self.cfg.max_step_percent, epoch, global_step)
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/prompt_processors/dummy_prompt_processor.py | threestudio/models/prompt_processors/dummy_prompt_processor.py | import json
import os
from dataclasses import dataclass
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessor, hash_prompt
from threestudio.utils.misc import cleanup
from threestudio.utils.typing import *
@threestudio.register("dummy-prompt-processor")
class DummyPromptProcessor(PromptProcessor):
@dataclass
class Config(PromptProcessor.Config):
pretrained_model_name_or_path: str = ""
prompt: str = ""
cfg: Config
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/prompt_processors/deepfloyd_prompt_processor.py | threestudio/models/prompt_processors/deepfloyd_prompt_processor.py | import json
import os
from dataclasses import dataclass
import torch
import torch.nn as nn
from diffusers import IFPipeline
from transformers import T5EncoderModel, T5Tokenizer
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessor, hash_prompt
from threestudio.utils.misc import cleanup
from threestudio.utils.typing import *
@threestudio.register("deep-floyd-prompt-processor")
class DeepFloydPromptProcessor(PromptProcessor):
@dataclass
class Config(PromptProcessor.Config):
pretrained_model_name_or_path: str = "DeepFloyd/IF-I-XL-v1.0"
cfg: Config
### these functions are unused, kept for debugging ###
def configure_text_encoder(self) -> None:
os.environ["TOKENIZERS_PARALLELISM"] = "false"
self.text_encoder = T5EncoderModel.from_pretrained(
self.cfg.pretrained_model_name_or_path,
subfolder="text_encoder",
load_in_8bit=True,
variant="8bit",
device_map="auto",
) # FIXME: behavior of auto device map in multi-GPU training
self.pipe = IFPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
text_encoder=self.text_encoder, # pass the previously instantiated 8bit text encoder
unet=None,
)
def destroy_text_encoder(self) -> None:
del self.text_encoder
del self.pipe
cleanup()
def get_text_embeddings(
self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]]
) -> Tuple[Float[Tensor, "B 77 4096"], Float[Tensor, "B 77 4096"]]:
text_embeddings, uncond_text_embeddings = self.pipe.encode_prompt(
prompt=prompt, negative_prompt=negative_prompt, device=self.device
)
return text_embeddings, uncond_text_embeddings
###
@staticmethod
def spawn_func(pretrained_model_name_or_path, prompts, cache_dir):
max_length = 77
tokenizer = T5Tokenizer.from_pretrained(
pretrained_model_name_or_path, subfolder="tokenizer"
)
text_encoder = T5EncoderModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
torch_dtype=torch.float16, # suppress warning
load_in_8bit=True,
variant="8bit",
device_map="auto",
)
with torch.no_grad():
text_inputs = tokenizer(
prompts,
padding="max_length",
max_length=max_length,
truncation=True,
add_special_tokens=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
attention_mask = text_inputs.attention_mask
text_embeddings = text_encoder(
text_input_ids.to(text_encoder.device),
attention_mask=attention_mask.to(text_encoder.device),
)
text_embeddings = text_embeddings[0]
for prompt, embedding in zip(prompts, text_embeddings):
torch.save(
embedding,
os.path.join(
cache_dir,
f"{hash_prompt(pretrained_model_name_or_path, prompt)}.pt",
),
)
del text_encoder
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/prompt_processors/__init__.py | threestudio/models/prompt_processors/__init__.py | from . import (
base,
deepfloyd_prompt_processor,
dummy_prompt_processor,
stable_diffusion_prompt_processor,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/prompt_processors/base.py | threestudio/models/prompt_processors/base.py | import json
import os
from dataclasses import dataclass, field
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from transformers import AutoTokenizer, BertForMaskedLM
import threestudio
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import barrier, cleanup, get_rank
from threestudio.utils.ops import shifted_cosine_decay, shifted_expotional_decay
from threestudio.utils.typing import *
def hash_prompt(model: str, prompt: str) -> str:
import hashlib
identifier = f"{model}-{prompt}"
return hashlib.md5(identifier.encode()).hexdigest()
@dataclass
class DirectionConfig:
name: str
prompt: Callable[[str], str]
negative_prompt: Callable[[str], str]
condition: Callable[
[Float[Tensor, "B"], Float[Tensor, "B"], Float[Tensor, "B"]],
Float[Tensor, "B"],
]
@dataclass
class PromptProcessorOutput:
text_embeddings: Float[Tensor, "N Nf"]
uncond_text_embeddings: Float[Tensor, "N Nf"]
text_embeddings_vd: Float[Tensor, "Nv N Nf"]
uncond_text_embeddings_vd: Float[Tensor, "Nv N Nf"]
directions: List[DirectionConfig]
direction2idx: Dict[str, int]
use_perp_neg: bool
perp_neg_f_sb: Tuple[float, float, float]
perp_neg_f_fsb: Tuple[float, float, float]
perp_neg_f_fs: Tuple[float, float, float]
perp_neg_f_sf: Tuple[float, float, float]
prompts_vd:List[str]
def get_text_embeddings(
self,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
view_dependent_prompting: bool = True,
) -> Float[Tensor, "BB N Nf"]:
batch_size = elevation.shape[0]
if view_dependent_prompting:
# Get direction
direction_idx = torch.zeros_like(elevation, dtype=torch.long)
for d in self.directions:
direction_idx[
d.condition(elevation, azimuth, camera_distances)
] = self.direction2idx[d.name]
# Get text embeddings
text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore
uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore
else:
text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore
uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore
batch_size, -1, -1
)
# IMPORTANT: we return (cond, uncond), which is in different order than other implementations!
return torch.cat([text_embeddings, uncond_text_embeddings], dim=0)
def get_text_embeddings_perp_neg(
self,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
view_dependent_prompting: bool = True,
) -> Tuple[Float[Tensor, "BBBB N Nf"], Float[Tensor, "B 2"]]:
assert (
view_dependent_prompting
), "Perp-Neg only works with view-dependent prompting"
batch_size = elevation.shape[0]
direction_idx = torch.zeros_like(elevation, dtype=torch.long)
for d in self.directions:
direction_idx[
d.condition(elevation, azimuth, camera_distances)
] = self.direction2idx[d.name]
# 0 - side view
# 1 - front view
# 2 - back view
# 3 - overhead view
pos_text_embeddings = []
neg_text_embeddings = []
neg_guidance_weights = []
uncond_text_embeddings = []
side_emb = self.text_embeddings_vd[0]
front_emb = self.text_embeddings_vd[1]
back_emb = self.text_embeddings_vd[2]
overhead_emb = self.text_embeddings_vd[3]
for idx, ele, azi, dis in zip(
direction_idx, elevation, azimuth, camera_distances
):
azi = shift_azimuth_deg(azi) # to (-180, 180)
uncond_text_embeddings.append(
self.uncond_text_embeddings_vd[idx]
) # should be ""
if idx.item() == 3: # overhead view
pos_text_embeddings.append(overhead_emb) # side view
# dummy
neg_text_embeddings += [
self.uncond_text_embeddings_vd[idx],
self.uncond_text_embeddings_vd[idx],
]
neg_guidance_weights += [0.0, 0.0]
else: # interpolating views
if torch.abs(azi) < 90:
# front-side interpolation
# 0 - complete side, 1 - complete front
r_inter = 1 - torch.abs(azi) / 90
pos_text_embeddings.append(
r_inter * front_emb + (1 - r_inter) * side_emb
)
neg_text_embeddings += [front_emb, side_emb]
neg_guidance_weights += [
-shifted_expotional_decay(*self.perp_neg_f_fs, r_inter),
-shifted_expotional_decay(*self.perp_neg_f_sf, 1 - r_inter),
]
else:
# side-back interpolation
# 0 - complete back, 1 - complete side
r_inter = 2.0 - torch.abs(azi) / 90
pos_text_embeddings.append(
r_inter * side_emb + (1 - r_inter) * back_emb
)
neg_text_embeddings += [side_emb, front_emb]
neg_guidance_weights += [
-shifted_expotional_decay(*self.perp_neg_f_sb, r_inter),
-shifted_expotional_decay(*self.perp_neg_f_fsb, r_inter),
]
text_embeddings = torch.cat(
[
torch.stack(pos_text_embeddings, dim=0),
torch.stack(uncond_text_embeddings, dim=0),
torch.stack(neg_text_embeddings, dim=0),
],
dim=0,
)
return text_embeddings, torch.as_tensor(
neg_guidance_weights, device=elevation.device
).reshape(batch_size, 2)
def shift_azimuth_deg(azimuth: Float[Tensor, "..."]) -> Float[Tensor, "..."]:
# shift azimuth angle (in degrees), to [-180, 180]
return (azimuth + 180) % 360 - 180
class PromptProcessor(BaseObject):
@dataclass
class Config(BaseObject.Config):
prompt: str = "a hamburger"
# manually assigned view-dependent prompts
prompt_front: Optional[str] = None
prompt_side: Optional[str] = None
prompt_back: Optional[str] = None
prompt_overhead: Optional[str] = None
negative_prompt: str = ""
pretrained_model_name_or_path: str = "runwayml/stable-diffusion-v1-5"
overhead_threshold: float = 60.0
front_threshold: float = 45.0
back_threshold: float = 45.0
view_dependent_prompt_front: bool = False
use_cache: bool = True
spawn: bool = True
# perp neg
use_perp_neg: bool = False
# a*e(-b*r) + c
# a * e(-b) + c = 0
perp_neg_f_sb: Tuple[float, float, float] = (1, 0.5, -0.606)
perp_neg_f_fsb: Tuple[float, float, float] = (1, 0.5, +0.967)
perp_neg_f_fs: Tuple[float, float, float] = (
4,
0.5,
-2.426,
) # f_fs(1) = 0, a, b > 0
perp_neg_f_sf: Tuple[float, float, float] = (4, 0.5, -2.426)
# prompt debiasing
use_prompt_debiasing: bool = False
pretrained_model_name_or_path_prompt_debiasing: str = "bert-base-uncased"
# index of words that can potentially be removed
prompt_debiasing_mask_ids: Optional[List[int]] = None
cfg: Config
@rank_zero_only
def configure_text_encoder(self) -> None:
raise NotImplementedError
@rank_zero_only
def destroy_text_encoder(self) -> None:
raise NotImplementedError
def configure(self) -> None:
self._cache_dir = ".threestudio_cache/text_embeddings" # FIXME: hard-coded path
# view-dependent text embeddings
self.directions: List[DirectionConfig]
if self.cfg.view_dependent_prompt_front:
self.directions = [
DirectionConfig(
"side",
lambda s: f"side view of {s}",
lambda s: s,
lambda ele, azi, dis: torch.ones_like(ele, dtype=torch.bool),
),
DirectionConfig(
"front",
lambda s: f"front view of {s}",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > -self.cfg.front_threshold
)
& (shift_azimuth_deg(azi) < self.cfg.front_threshold),
),
DirectionConfig(
"back",
lambda s: f"backside view of {s}",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > 180 - self.cfg.back_threshold
)
| (shift_azimuth_deg(azi) < -180 + self.cfg.back_threshold),
),
DirectionConfig(
"overhead",
lambda s: f"overhead view of {s}",
lambda s: s,
lambda ele, azi, dis: ele > self.cfg.overhead_threshold,
),
]
else:
self.directions = [
DirectionConfig(
"side",
lambda s: f"{s}, side view",
lambda s: s,
lambda ele, azi, dis: torch.ones_like(ele, dtype=torch.bool),
),
DirectionConfig(
"front",
lambda s: f"{s}, front view",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > -self.cfg.front_threshold
)
& (shift_azimuth_deg(azi) < self.cfg.front_threshold),
),
DirectionConfig(
"back",
lambda s: f"{s}, back view",
lambda s: s,
lambda ele, azi, dis: (
shift_azimuth_deg(azi) > 180 - self.cfg.back_threshold
)
| (shift_azimuth_deg(azi) < -180 + self.cfg.back_threshold),
),
DirectionConfig(
"overhead",
lambda s: f"{s}, overhead view",
lambda s: s,
lambda ele, azi, dis: ele > self.cfg.overhead_threshold,
),
]
self.direction2idx = {d.name: i for i, d in enumerate(self.directions)}
with open(os.path.join("load/prompt_library.json"), "r") as f:
self.prompt_library = json.load(f)
# use provided prompt or find prompt in library
self.prompt = self.preprocess_prompt(self.cfg.prompt)
# use provided negative prompt
self.negative_prompt = self.cfg.negative_prompt
threestudio.info(
f"Using prompt [{self.prompt}] and negative prompt [{self.negative_prompt}]"
)
# view-dependent prompting
if self.cfg.use_prompt_debiasing:
assert (
self.cfg.prompt_side is None
and self.cfg.prompt_back is None
and self.cfg.prompt_overhead is None
), "Do not manually assign prompt_side, prompt_back or prompt_overhead when using prompt debiasing"
prompts = self.get_debiased_prompt(self.prompt)
self.prompts_vd = [
d.prompt(prompt) for d, prompt in zip(self.directions, prompts)
]
else:
self.prompts_vd = [
self.cfg.get(f"prompt_{d.name}", None) or d.prompt(self.prompt) # type: ignore
for d in self.directions
]
prompts_vd_display = " ".join(
[
f"[{d.name}]:[{prompt}]"
for prompt, d in zip(self.prompts_vd, self.directions)
]
)
threestudio.info(f"Using view-dependent prompts {prompts_vd_display}")
self.negative_prompts_vd = [
d.negative_prompt(self.negative_prompt) for d in self.directions
]
self.prepare_text_embeddings()
self.load_text_embeddings()
@staticmethod
def spawn_func(pretrained_model_name_or_path, prompts, cache_dir):
raise NotImplementedError
@rank_zero_only
def prepare_text_embeddings(self):
os.makedirs(self._cache_dir, exist_ok=True)
all_prompts = (
[self.prompt]
+ [self.negative_prompt]
+ self.prompts_vd
+ self.negative_prompts_vd
)
prompts_to_process = []
for prompt in all_prompts:
if self.cfg.use_cache:
# some text embeddings are already in cache
# do not process them
cache_path = os.path.join(
self._cache_dir,
f"{hash_prompt(self.cfg.pretrained_model_name_or_path, prompt)}.pt",
)
if os.path.exists(cache_path):
threestudio.debug(
f"Text embeddings for model {self.cfg.pretrained_model_name_or_path} and prompt [{prompt}] are already in cache, skip processing."
)
continue
prompts_to_process.append(prompt)
if len(prompts_to_process) > 0:
if self.cfg.spawn:
ctx = mp.get_context("spawn")
subprocess = ctx.Process(
target=self.spawn_func,
args=(
self.cfg.pretrained_model_name_or_path,
prompts_to_process,
self._cache_dir,
),
)
subprocess.start()
subprocess.join()
else:
self.spawn_func(
self.cfg.pretrained_model_name_or_path,
prompts_to_process,
self._cache_dir,
)
cleanup()
def load_text_embeddings(self):
# synchronize, to ensure the text embeddings have been computed and saved to cache
barrier()
self.text_embeddings = self.load_from_cache(self.prompt)[None, ...]
self.uncond_text_embeddings = self.load_from_cache(self.negative_prompt)[
None, ...
]
self.text_embeddings_vd = torch.stack(
[self.load_from_cache(prompt) for prompt in self.prompts_vd], dim=0
)
self.uncond_text_embeddings_vd = torch.stack(
[self.load_from_cache(prompt) for prompt in self.negative_prompts_vd], dim=0
)
threestudio.debug(f"Loaded text embeddings.")
def load_from_cache(self, prompt):
cache_path = os.path.join(
self._cache_dir,
f"{hash_prompt(self.cfg.pretrained_model_name_or_path, prompt)}.pt",
)
if not os.path.exists(cache_path):
raise FileNotFoundError(
f"Text embedding file {cache_path} for model {self.cfg.pretrained_model_name_or_path} and prompt [{prompt}] not found."
)
return torch.load(cache_path, map_location=self.device)
def preprocess_prompt(self, prompt: str) -> str:
if prompt.startswith("lib:"):
# find matches in the library
candidate = None
keywords = prompt[4:].lower().split("_")
for prompt in self.prompt_library["dreamfusion"]:
if all([k in prompt.lower() for k in keywords]):
if candidate is not None:
raise ValueError(
f"Multiple prompts matched with keywords {keywords} in library"
)
candidate = prompt
if candidate is None:
raise ValueError(
f"Cannot find prompt with keywords {keywords} in library"
)
threestudio.info("Find matched prompt in library: " + candidate)
return candidate
else:
return prompt
def get_text_embeddings(
self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]]
) -> Tuple[Float[Tensor, "B ..."], Float[Tensor, "B ..."]]:
raise NotImplementedError
def get_debiased_prompt(self, prompt: str) -> List[str]:
os.environ["TOKENIZERS_PARALLELISM"] = "false"
tokenizer = AutoTokenizer.from_pretrained(
self.cfg.pretrained_model_name_or_path_prompt_debiasing
)
model = BertForMaskedLM.from_pretrained(
self.cfg.pretrained_model_name_or_path_prompt_debiasing
)
views = [d.name for d in self.directions]
view_ids = tokenizer(" ".join(views), return_tensors="pt").input_ids[0]
view_ids = view_ids[1:5]
def modulate(prompt):
prompt_vd = f"This image is depicting a [MASK] view of {prompt}"
tokens = tokenizer(
prompt_vd,
padding="max_length",
truncation=True,
add_special_tokens=True,
return_tensors="pt",
)
mask_idx = torch.where(tokens.input_ids == tokenizer.mask_token_id)[1]
logits = model(**tokens).logits
logits = F.softmax(logits[0, mask_idx], dim=-1)
logits = logits[0, view_ids]
probes = logits / logits.sum()
return probes
prompts = [prompt.split(" ") for _ in range(4)]
full_probe = modulate(prompt)
n_words = len(prompt.split(" "))
prompt_debiasing_mask_ids = (
self.cfg.prompt_debiasing_mask_ids
if self.cfg.prompt_debiasing_mask_ids is not None
else list(range(n_words))
)
words_to_debias = [prompt.split(" ")[idx] for idx in prompt_debiasing_mask_ids]
threestudio.info(f"Words that can potentially be removed: {words_to_debias}")
for idx in prompt_debiasing_mask_ids:
words = prompt.split(" ")
prompt_ = " ".join(words[:idx] + words[(idx + 1) :])
part_probe = modulate(prompt_)
pmi = full_probe / torch.lerp(part_probe, full_probe, 0.5)
for i in range(pmi.shape[0]):
if pmi[i].item() < 0.95:
prompts[i][idx] = ""
debiased_prompts = [" ".join([word for word in p if word]) for p in prompts]
for d, debiased_prompt in zip(views, debiased_prompts):
threestudio.info(f"Debiased prompt of the {d} view is [{debiased_prompt}]")
del tokenizer, model
cleanup()
return debiased_prompts
def __call__(self) -> PromptProcessorOutput:
return PromptProcessorOutput(
text_embeddings=self.text_embeddings,
uncond_text_embeddings=self.uncond_text_embeddings,
text_embeddings_vd=self.text_embeddings_vd,
uncond_text_embeddings_vd=self.uncond_text_embeddings_vd,
directions=self.directions,
direction2idx=self.direction2idx,
use_perp_neg=self.cfg.use_perp_neg,
perp_neg_f_sb=self.cfg.perp_neg_f_sb,
perp_neg_f_fsb=self.cfg.perp_neg_f_fsb,
perp_neg_f_fs=self.cfg.perp_neg_f_fs,
perp_neg_f_sf=self.cfg.perp_neg_f_sf,
prompts_vd = self.prompts_vd
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/models/prompt_processors/stable_diffusion_prompt_processor.py | threestudio/models/prompt_processors/stable_diffusion_prompt_processor.py | import json
import os
from dataclasses import dataclass
import torch
import torch.nn as nn
from transformers import AutoTokenizer, CLIPTextModel
import threestudio
from threestudio.models.prompt_processors.base import PromptProcessor, hash_prompt
from threestudio.utils.misc import cleanup
from threestudio.utils.typing import *
@threestudio.register("stable-diffusion-prompt-processor")
class StableDiffusionPromptProcessor(PromptProcessor):
@dataclass
class Config(PromptProcessor.Config):
pass
cfg: Config
### these functions are unused, kept for debugging ###
def configure_text_encoder(self) -> None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.cfg.pretrained_model_name_or_path, subfolder="tokenizer"
)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
self.text_encoder = CLIPTextModel.from_pretrained(
self.cfg.pretrained_model_name_or_path, subfolder="text_encoder"
).to(self.device)
for p in self.text_encoder.parameters():
p.requires_grad_(False)
def destroy_text_encoder(self) -> None:
del self.tokenizer
del self.text_encoder
cleanup()
def get_text_embeddings(
self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]]
) -> Tuple[Float[Tensor, "B 77 768"], Float[Tensor, "B 77 768"]]:
if isinstance(prompt, str):
prompt = [prompt]
if isinstance(negative_prompt, str):
negative_prompt = [negative_prompt]
# Tokenize text and get embeddings
tokens = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
uncond_tokens = self.tokenizer(
negative_prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
with torch.no_grad():
text_embeddings = self.text_encoder(tokens.input_ids.to(self.device))[0]
uncond_text_embeddings = self.text_encoder(
uncond_tokens.input_ids.to(self.device)
)[0]
return text_embeddings, uncond_text_embeddings
###
@staticmethod
def spawn_func(pretrained_model_name_or_path, prompts, cache_dir):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, subfolder="tokenizer"
)
text_encoder = CLIPTextModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
device_map="auto",
)
with torch.no_grad():
tokens = tokenizer(
prompts,
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
)
text_embeddings = text_encoder(tokens.input_ids.to(text_encoder.device))[0]
for prompt, embedding in zip(prompts, text_embeddings):
torch.save(
embedding,
os.path.join(
cache_dir,
f"{hash_prompt(pretrained_model_name_or_path, prompt)}.pt",
),
)
del text_encoder
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/typing.py | threestudio/utils/typing.py | """
This module contains type annotations for the project, using
1. Python type hints (https://docs.python.org/3/library/typing.html) for Python objects
2. jaxtyping (https://github.com/google/jaxtyping/blob/main/API.md) for PyTorch tensors
Two types of typing checking can be used:
1. Static type checking with mypy (install with pip and enabled as the default linter in VSCode)
2. Runtime type checking with typeguard (install with pip and triggered at runtime, mainly for tensor dtype and shape checking)
"""
# Basic types
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Literal,
NamedTuple,
NewType,
Optional,
Sized,
Tuple,
Type,
TypeVar,
Union,
)
# Tensor dtype
# for jaxtyping usage, see https://github.com/google/jaxtyping/blob/main/API.md
from jaxtyping import Bool, Complex, Float, Inexact, Int, Integer, Num, Shaped, UInt
# Config type
from omegaconf import DictConfig
# PyTorch Tensor type
from torch import Tensor
# Runtime type checking decorator
from typeguard import typechecked as typechecker
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/callbacks.py | threestudio/utils/callbacks.py | import os
import shutil
import subprocess
import pytorch_lightning
from threestudio.utils.config import dump_config
from threestudio.utils.misc import parse_version
if parse_version(pytorch_lightning.__version__) > parse_version("1.8"):
from pytorch_lightning.callbacks import Callback
else:
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.progress import TQDMProgressBar
from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn
class VersionedCallback(Callback):
def __init__(self, save_root, version=None, use_version=True):
self.save_root = save_root
self._version = version
self.use_version = use_version
@property
def version(self) -> int:
"""Get the experiment version.
Returns:
The experiment version if specified else the next version.
"""
if self._version is None:
self._version = self._get_next_version()
return self._version
def _get_next_version(self):
existing_versions = []
if os.path.isdir(self.save_root):
for f in os.listdir(self.save_root):
bn = os.path.basename(f)
if bn.startswith("version_"):
dir_ver = os.path.splitext(bn)[0].split("_")[1].replace("/", "")
existing_versions.append(int(dir_ver))
if len(existing_versions) == 0:
return 0
return max(existing_versions) + 1
@property
def savedir(self):
if not self.use_version:
return self.save_root
return os.path.join(
self.save_root,
self.version
if isinstance(self.version, str)
else f"version_{self.version}",
)
class CodeSnapshotCallback(VersionedCallback):
def __init__(self, save_root, version=None, use_version=True):
super().__init__(save_root, version, use_version)
def get_file_list(self):
return [
b.decode()
for b in set(
subprocess.check_output(
'git ls-files -- ":!:load/*"', shell=True
).splitlines()
)
| set( # hard code, TODO: use config to exclude folders or files
subprocess.check_output(
"git ls-files --others --exclude-standard", shell=True
).splitlines()
)
]
@rank_zero_only
def save_code_snapshot(self):
os.makedirs(self.savedir, exist_ok=True)
for f in self.get_file_list():
if not os.path.exists(f) or os.path.isdir(f):
continue
os.makedirs(os.path.join(self.savedir, os.path.dirname(f)), exist_ok=True)
shutil.copyfile(f, os.path.join(self.savedir, f))
def on_fit_start(self, trainer, pl_module):
try:
self.save_code_snapshot()
except:
rank_zero_warn(
"Code snapshot is not saved. Please make sure you have git installed and are in a git repository."
)
class ConfigSnapshotCallback(VersionedCallback):
def __init__(self, config_path, config, save_root, version=None, use_version=True):
super().__init__(save_root, version, use_version)
self.config_path = config_path
self.config = config
@rank_zero_only
def save_config_snapshot(self):
os.makedirs(self.savedir, exist_ok=True)
dump_config(os.path.join(self.savedir, "parsed.yaml"), self.config)
shutil.copyfile(self.config_path, os.path.join(self.savedir, "raw.yaml"))
def on_fit_start(self, trainer, pl_module):
self.save_config_snapshot()
class CustomProgressBar(TQDMProgressBar):
def get_metrics(self, *args, **kwargs):
# don't show the version number
items = super().get_metrics(*args, **kwargs)
items.pop("v_num", None)
return items
class ProgressCallback(Callback):
def __init__(self, save_path):
super().__init__()
self.save_path = save_path
self._file_handle = None
@property
def file_handle(self):
if self._file_handle is None:
self._file_handle = open(self.save_path, "w")
return self._file_handle
@rank_zero_only
def write(self, msg: str) -> None:
self.file_handle.seek(0)
self.file_handle.truncate()
self.file_handle.write(msg)
self.file_handle.flush()
@rank_zero_only
def on_train_batch_end(self, trainer, pl_module, *args, **kwargs):
self.write(
f"Generation progress: {pl_module.true_global_step / trainer.max_steps * 100:.2f}%"
)
@rank_zero_only
def on_validation_start(self, trainer, pl_module):
self.write(f"Rendering validation image ...")
@rank_zero_only
def on_test_start(self, trainer, pl_module):
self.write(f"Rendering video ...")
@rank_zero_only
def on_predict_start(self, trainer, pl_module):
self.write(f"Exporting mesh assets ...")
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/ops.py | threestudio/utils/ops.py | from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from igl import fast_winding_number_for_meshes, point_mesh_squared_distance, read_obj
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
import threestudio
from threestudio.utils.typing import *
def dot(x, y):
return torch.sum(x * y, -1, keepdim=True)
def reflect(x, n):
return 2 * dot(x, n) * n - x
ValidScale = Union[Tuple[float, float], Num[Tensor, "2 D"]]
def scale_tensor(
dat: Num[Tensor, "... D"], inp_scale: ValidScale, tgt_scale: ValidScale
):
if inp_scale is None:
inp_scale = (0, 1)
if tgt_scale is None:
tgt_scale = (0, 1)
if isinstance(tgt_scale, Tensor):
assert dat.shape[-1] == tgt_scale.shape[-1]
dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])
dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]
return dat
class _TruncExp(Function): # pylint: disable=abstract-method
# Implementation from torch-ngp:
# https://github.com/ashawkey/torch-ngp/blob/93b08a0d4ec1cc6e69d85df7f0acdfb99603b628/activation.py
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, x): # pylint: disable=arguments-differ
ctx.save_for_backward(x)
return torch.exp(x)
@staticmethod
@custom_bwd
def backward(ctx, g): # pylint: disable=arguments-differ
x = ctx.saved_tensors[0]
return g * torch.exp(torch.clamp(x, max=15))
class SpecifyGradient(Function):
# Implementation from stable-dreamfusion
# https://github.com/ashawkey/stable-dreamfusion
@staticmethod
@custom_fwd
def forward(ctx, input_tensor, gt_grad):
ctx.save_for_backward(gt_grad)
# we return a dummy value 1, which will be scaled by amp's scaler so we get the scale in backward.
return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype)
@staticmethod
@custom_bwd
def backward(ctx, grad_scale):
(gt_grad,) = ctx.saved_tensors
gt_grad = gt_grad * grad_scale
return gt_grad, None
trunc_exp = _TruncExp.apply
def get_activation(name) -> Callable:
if name is None:
return lambda x: x
name = name.lower()
if name == "none":
return lambda x: x
elif name == "lin2srgb":
return lambda x: torch.where(
x > 0.0031308,
torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,
12.92 * x,
).clamp(0.0, 1.0)
elif name == "exp":
return lambda x: torch.exp(x)
elif name == "shifted_exp":
return lambda x: torch.exp(x - 1.0)
elif name == "trunc_exp":
return trunc_exp
elif name == "shifted_trunc_exp":
return lambda x: trunc_exp(x - 1.0)
elif name == "sigmoid":
return lambda x: torch.sigmoid(x)
elif name == "tanh":
return lambda x: torch.tanh(x)
elif name == "shifted_softplus":
return lambda x: F.softplus(x - 1.0)
elif name == "scale_-11_01":
return lambda x: x * 0.5 + 0.5
else:
try:
return getattr(F, name)
except AttributeError:
raise ValueError(f"Unknown activation function: {name}")
def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:
if chunk_size <= 0:
return func(*args, **kwargs)
B = None
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, torch.Tensor):
B = arg.shape[0]
break
assert (
B is not None
), "No tensor found in args or kwargs, cannot determine batch size."
out = defaultdict(list)
out_type = None
# max(1, B) to support B == 0
for i in range(0, max(1, B), chunk_size):
out_chunk = func(
*[
arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg
for arg in args
],
**{
k: arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg
for k, arg in kwargs.items()
},
)
if out_chunk is None:
continue
out_type = type(out_chunk)
if isinstance(out_chunk, torch.Tensor):
out_chunk = {0: out_chunk}
elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):
chunk_length = len(out_chunk)
out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}
elif isinstance(out_chunk, dict):
pass
else:
print(
f"Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}."
)
exit(1)
for k, v in out_chunk.items():
v = v if torch.is_grad_enabled() else v.detach()
out[k].append(v)
if out_type is None:
return None
out_merged: Dict[Any, Optional[torch.Tensor]] = {}
for k, v in out.items():
if all([vv is None for vv in v]):
# allow None in return value
out_merged[k] = None
elif all([isinstance(vv, torch.Tensor) for vv in v]):
out_merged[k] = torch.cat(v, dim=0)
else:
raise TypeError(
f"Unsupported types in return value of func: {[type(vv) for vv in v if not isinstance(vv, torch.Tensor)]}"
)
if out_type is torch.Tensor:
return out_merged[0]
elif out_type in [tuple, list]:
return out_type([out_merged[i] for i in range(chunk_length)])
elif out_type is dict:
return out_merged
def get_ray_directions(
H: int,
W: int,
focal: Union[float, Tuple[float, float]],
principal: Optional[Tuple[float, float]] = None,
use_pixel_centers: bool = True,
) -> Float[Tensor, "H W 3"]:
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal, principal, use_pixel_centers: image height, width, focal length, principal point and whether use pixel centers
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
pixel_center = 0.5 if use_pixel_centers else 0
if isinstance(focal, float):
fx, fy = focal, focal
cx, cy = W / 2, H / 2
else:
fx, fy = focal
assert principal is not None
cx, cy = principal
i, j = torch.meshgrid(
torch.arange(W, dtype=torch.float32) + pixel_center,
torch.arange(H, dtype=torch.float32) + pixel_center,
indexing="xy",
)
directions: Float[Tensor, "H W 3"] = torch.stack(
[(i - cx) / fx, -(j - cy) / fy, -torch.ones_like(i)], -1
)
return directions
def get_rays(
directions: Float[Tensor, "... 3"],
c2w: Float[Tensor, "... 4 4"],
keepdim=False,
noise_scale=0.0,
) -> Tuple[Float[Tensor, "... 3"], Float[Tensor, "... 3"]]:
# Rotate ray directions from camera coordinate to the world coordinate
assert directions.shape[-1] == 3
if directions.ndim == 2: # (N_rays, 3)
if c2w.ndim == 2: # (4, 4)
c2w = c2w[None, :, :]
assert c2w.ndim == 3 # (N_rays, 4, 4) or (1, 4, 4)
rays_d = (directions[:, None, :] * c2w[:, :3, :3]).sum(-1) # (N_rays, 3)
rays_o = c2w[:, :3, 3].expand(rays_d.shape)
elif directions.ndim == 3: # (H, W, 3)
assert c2w.ndim in [2, 3]
if c2w.ndim == 2: # (4, 4)
rays_d = (directions[:, :, None, :] * c2w[None, None, :3, :3]).sum(
-1
) # (H, W, 3)
rays_o = c2w[None, None, :3, 3].expand(rays_d.shape)
elif c2w.ndim == 3: # (B, 4, 4)
rays_d = (directions[None, :, :, None, :] * c2w[:, None, None, :3, :3]).sum(
-1
) # (B, H, W, 3)
rays_o = c2w[:, None, None, :3, 3].expand(rays_d.shape)
elif directions.ndim == 4: # (B, H, W, 3)
assert c2w.ndim == 3 # (B, 4, 4)
rays_d = (directions[:, :, :, None, :] * c2w[:, None, None, :3, :3]).sum(
-1
) # (B, H, W, 3)
rays_o = c2w[:, None, None, :3, 3].expand(rays_d.shape)
# add camera noise to avoid grid-like artifect
# https://github.com/ashawkey/stable-dreamfusion/blob/49c3d4fa01d68a4f027755acf94e1ff6020458cc/nerf/utils.py#L373
if noise_scale > 0:
rays_o = rays_o + torch.randn(3, device=rays_o.device) * noise_scale
rays_d = rays_d + torch.randn(3, device=rays_d.device) * noise_scale
rays_d = F.normalize(rays_d, dim=-1)
if not keepdim:
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
return rays_o, rays_d
def get_projection_matrix(
fovy: Float[Tensor, "B"], aspect_wh: float, near: float, far: float
) -> Float[Tensor, "B 4 4"]:
batch_size = fovy.shape[0]
proj_mtx = torch.zeros(batch_size, 4, 4, dtype=torch.float32)
proj_mtx[:, 0, 0] = 1.0 / (torch.tan(fovy / 2.0) * aspect_wh)
proj_mtx[:, 1, 1] = -1.0 / torch.tan(
fovy / 2.0
) # add a negative sign here as the y axis is flipped in nvdiffrast output
proj_mtx[:, 2, 2] = -(far + near) / (far - near)
proj_mtx[:, 2, 3] = -2.0 * far * near / (far - near)
proj_mtx[:, 3, 2] = -1.0
return proj_mtx
def get_mvp_matrix(
c2w: Float[Tensor, "B 4 4"], proj_mtx: Float[Tensor, "B 4 4"]
) -> Float[Tensor, "B 4 4"]:
# calculate w2c from c2w: R' = Rt, t' = -Rt * t
# mathematically equivalent to (c2w)^-1
w2c: Float[Tensor, "B 4 4"] = torch.zeros(c2w.shape[0], 4, 4).to(c2w)
w2c[:, :3, :3] = c2w[:, :3, :3].permute(0, 2, 1)
w2c[:, :3, 3:] = -c2w[:, :3, :3].permute(0, 2, 1) @ c2w[:, :3, 3:]
w2c[:, 3, 3] = 1.0
# calculate mvp matrix by proj_mtx @ w2c (mv_mtx)
mvp_mtx = proj_mtx @ w2c
return mvp_mtx
def binary_cross_entropy(input, target):
"""
F.binary_cross_entropy is not numerically stable in mixed-precision training.
"""
return -(target * torch.log(input) + (1 - target) * torch.log(1 - input)).mean()
def tet_sdf_diff(
vert_sdf: Float[Tensor, "Nv 1"], tet_edges: Integer[Tensor, "Ne 2"]
) -> Float[Tensor, ""]:
sdf_f1x6x2 = vert_sdf[:, 0][tet_edges.reshape(-1)].reshape(-1, 2)
mask = torch.sign(sdf_f1x6x2[..., 0]) != torch.sign(sdf_f1x6x2[..., 1])
sdf_f1x6x2 = sdf_f1x6x2[mask]
sdf_diff = F.binary_cross_entropy_with_logits(
sdf_f1x6x2[..., 0], (sdf_f1x6x2[..., 1] > 0).float()
) + F.binary_cross_entropy_with_logits(
sdf_f1x6x2[..., 1], (sdf_f1x6x2[..., 0] > 0).float()
)
return sdf_diff
# Implementation from Latent-NeRF
# https://github.com/eladrich/latent-nerf/blob/f49ecefcd48972e69a28e3116fe95edf0fac4dc8/src/latent_nerf/models/mesh_utils.py
class MeshOBJ:
dx = torch.zeros(3).float()
dx[0] = 1
dy, dz = dx[[1, 0, 2]], dx[[2, 1, 0]]
dx, dy, dz = dx[None, :], dy[None, :], dz[None, :]
def __init__(self, v: np.ndarray, f: np.ndarray):
self.v = v
self.f = f
self.dx, self.dy, self.dz = MeshOBJ.dx, MeshOBJ.dy, MeshOBJ.dz
self.v_tensor = torch.from_numpy(self.v)
vf = self.v[self.f, :]
self.f_center = vf.mean(axis=1)
self.f_center_tensor = torch.from_numpy(self.f_center).float()
e1 = vf[:, 1, :] - vf[:, 0, :]
e2 = vf[:, 2, :] - vf[:, 0, :]
self.face_normals = np.cross(e1, e2)
self.face_normals = (
self.face_normals / np.linalg.norm(self.face_normals, axis=-1)[:, None]
)
self.face_normals_tensor = torch.from_numpy(self.face_normals)
def normalize_mesh(self, target_scale=0.5):
verts = self.v
# Compute center of bounding box
# center = torch.mean(torch.column_stack([torch.max(verts, dim=0)[0], torch.min(verts, dim=0)[0]]))
center = verts.mean(axis=0)
verts = verts - center
scale = np.max(np.linalg.norm(verts, axis=1))
verts = (verts / scale) * target_scale
return MeshOBJ(verts, self.f)
def winding_number(self, query: torch.Tensor):
device = query.device
shp = query.shape
query_np = query.detach().cpu().reshape(-1, 3).numpy()
target_alphas = fast_winding_number_for_meshes(
self.v.astype(np.float32), self.f, query_np
)
return torch.from_numpy(target_alphas).reshape(shp[:-1]).to(device)
def gaussian_weighted_distance(self, query: torch.Tensor, sigma):
device = query.device
shp = query.shape
query_np = query.detach().cpu().reshape(-1, 3).numpy()
distances, _, _ = point_mesh_squared_distance(
query_np, self.v.astype(np.float32), self.f
)
distances = torch.from_numpy(distances).reshape(shp[:-1]).to(device)
weight = torch.exp(-(distances / (2 * sigma**2)))
return weight
def ce_pq_loss(p, q, weight=None):
def clamp(v, T=0.0001):
return v.clamp(T, 1 - T)
p = p.view(q.shape)
ce = -1 * (p * torch.log(clamp(q)) + (1 - p) * torch.log(clamp(1 - q)))
if weight is not None:
ce *= weight
return ce.sum()
class ShapeLoss(nn.Module):
def __init__(self, guide_shape):
super().__init__()
self.mesh_scale = 0.7
self.proximal_surface = 0.3
self.delta = 0.2
self.shape_path = guide_shape
v, _, _, f, _, _ = read_obj(self.shape_path, float)
mesh = MeshOBJ(v, f)
matrix_rot = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) @ np.array(
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]]
)
self.sketchshape = mesh.normalize_mesh(self.mesh_scale)
self.sketchshape = MeshOBJ(
np.ascontiguousarray(
(matrix_rot @ self.sketchshape.v.transpose(1, 0)).transpose(1, 0)
),
f,
)
def forward(self, xyzs, sigmas):
mesh_occ = self.sketchshape.winding_number(xyzs)
if self.proximal_surface > 0:
weight = 1 - self.sketchshape.gaussian_weighted_distance(
xyzs, self.proximal_surface
)
else:
weight = None
indicator = (mesh_occ > 0.5).float()
nerf_occ = 1 - torch.exp(-self.delta * sigmas)
nerf_occ = nerf_occ.clamp(min=0, max=1.1)
loss = ce_pq_loss(
nerf_occ, indicator, weight=weight
) # order is important for CE loss + second argument may not be optimized
return loss
def shifted_expotional_decay(a, b, c, r):
return a * torch.exp(-b * r) + c
def shifted_cosine_decay(a, b, c, r):
return a * torch.cos(b * r + c) + a
def perpendicular_component(x: Float[Tensor, "B C H W"], y: Float[Tensor, "B C H W"]):
# get the component of x that is perpendicular to y
eps = torch.ones_like(x[:, 0, 0, 0]) * 1e-6
return (
x
- (
torch.mul(x, y).sum(dim=[1, 2, 3])
/ torch.maximum(torch.mul(y, y).sum(dim=[1, 2, 3]), eps)
).view(-1, 1, 1, 1)
* y
)
def validate_empty_rays(ray_indices, t_start, t_end):
if ray_indices.nelement() == 0:
threestudio.warn("Empty rays_indices!")
ray_indices = torch.LongTensor([0]).to(ray_indices)
t_start = torch.Tensor([0]).to(ray_indices)
t_end = torch.Tensor([0]).to(ray_indices)
return ray_indices, t_start, t_end
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/rasterize.py | threestudio/utils/rasterize.py | import nvdiffrast.torch as dr
import torch
from threestudio.utils.typing import *
class NVDiffRasterizerContext:
def __init__(self, context_type: str, device: torch.device) -> None:
self.device = device
self.ctx = self.initialize_context(context_type, device)
def initialize_context(
self, context_type: str, device: torch.device
) -> Union[dr.RasterizeGLContext, dr.RasterizeCudaContext]:
if context_type == "gl":
return dr.RasterizeGLContext(device=device)
elif context_type == "cuda":
return dr.RasterizeCudaContext(device=device)
else:
raise ValueError(f"Unknown rasterizer context type: {context_type}")
def vertex_transform(
self, verts: Float[Tensor, "Nv 3"], mvp_mtx: Float[Tensor, "B 4 4"]
) -> Float[Tensor, "B Nv 4"]:
verts_homo = torch.cat(
[verts, torch.ones([verts.shape[0], 1]).to(verts)], dim=-1
)
return torch.matmul(verts_homo, mvp_mtx.permute(0, 2, 1))
def rasterize(
self,
pos: Float[Tensor, "B Nv 4"],
tri: Integer[Tensor, "Nf 3"],
resolution: Union[int, Tuple[int, int]],
):
# rasterize in instance mode (single topology)
return dr.rasterize(self.ctx, pos.float(), tri.int(), resolution, grad_db=True)
def rasterize_one(
self,
pos: Float[Tensor, "Nv 4"],
tri: Integer[Tensor, "Nf 3"],
resolution: Union[int, Tuple[int, int]],
):
# rasterize one single mesh under a single viewpoint
rast, rast_db = self.rasterize(pos[None, ...], tri, resolution)
return rast[0], rast_db[0]
def antialias(
self,
color: Float[Tensor, "B H W C"],
rast: Float[Tensor, "B H W 4"],
pos: Float[Tensor, "B Nv 4"],
tri: Integer[Tensor, "Nf 3"],
) -> Float[Tensor, "B H W C"]:
return dr.antialias(color.float(), rast, pos.float(), tri.int())
def interpolate(
self,
attr: Float[Tensor, "B Nv C"],
rast: Float[Tensor, "B H W 4"],
tri: Integer[Tensor, "Nf 3"],
rast_db=None,
diff_attrs=None,
) -> Float[Tensor, "B H W C"]:
return dr.interpolate(
attr.float(), rast, tri.int(), rast_db=rast_db, diff_attrs=diff_attrs
)
def interpolate_one(
self,
attr: Float[Tensor, "Nv C"],
rast: Float[Tensor, "B H W 4"],
tri: Integer[Tensor, "Nf 3"],
rast_db=None,
diff_attrs=None,
) -> Float[Tensor, "B H W C"]:
return self.interpolate(attr[None, ...], rast, tri, rast_db, diff_attrs)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/misc.py | threestudio/utils/misc.py | import gc
import os
import re
import tinycudann as tcnn
import torch
from packaging import version
from threestudio.utils.config import config_to_primitive
from threestudio.utils.typing import *
def parse_version(ver: str):
return version.parse(ver)
def get_rank():
# SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,
# therefore LOCAL_RANK needs to be checked first
rank_keys = ("RANK", "LOCAL_RANK", "SLURM_PROCID", "JSM_NAMESPACE_RANK")
for key in rank_keys:
rank = os.environ.get(key)
if rank is not None:
return int(rank)
return 0
def get_device():
return torch.device(f"cuda:{get_rank()}")
def load_module_weights(
path, module_name=None, ignore_modules=None, map_location=None
) -> Tuple[dict, int, int]:
if module_name is not None and ignore_modules is not None:
raise ValueError("module_name and ignore_modules cannot be both set")
if map_location is None:
map_location = get_device()
ckpt = torch.load(path, map_location=map_location)
state_dict = ckpt["state_dict"]
state_dict_to_load = state_dict
if ignore_modules is not None:
state_dict_to_load = {}
for k, v in state_dict.items():
ignore = any(
[k.startswith(ignore_module + ".") for ignore_module in ignore_modules]
)
if ignore:
continue
state_dict_to_load[k] = v
if module_name is not None:
state_dict_to_load = {}
for k, v in state_dict.items():
m = re.match(rf"^{module_name}\.(.*)$", k)
if m is None:
continue
state_dict_to_load[m.group(1)] = v
return state_dict_to_load, ckpt["epoch"], ckpt["global_step"]
def C(value: Any, epoch: int, global_step: int) -> float:
if isinstance(value, int) or isinstance(value, float):
pass
else:
value = config_to_primitive(value)
if not isinstance(value, list):
raise TypeError("Scalar specification only supports list, got", type(value))
if len(value) == 3:
value = [0] + value
assert len(value) == 4
start_step, start_value, end_value, end_step = value
if isinstance(end_step, int):
current_step = global_step
value = start_value + (end_value - start_value) * max(
min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0
)
elif isinstance(end_step, float):
current_step = epoch
value = start_value + (end_value - start_value) * max(
min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0
)
return value
def cleanup():
gc.collect()
torch.cuda.empty_cache()
tcnn.free_temporary_memory()
def finish_with_cleanup(func: Callable):
def wrapper(*args, **kwargs):
out = func(*args, **kwargs)
cleanup()
return out
return wrapper
def _distributed_available():
return torch.distributed.is_available() and torch.distributed.is_initialized()
def barrier():
if not _distributed_available():
return
else:
torch.distributed.barrier()
def broadcast(tensor, src=0):
if not _distributed_available():
return tensor
else:
torch.distributed.broadcast(tensor, src=src)
return tensor
def enable_gradient(model, enabled: bool = True) -> None:
for param in model.parameters():
param.requires_grad_(enabled)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/config.py | threestudio/utils/config.py | import os
from dataclasses import dataclass, field
from datetime import datetime
from omegaconf import OmegaConf
import threestudio
from threestudio.utils.typing import *
# ============ Register OmegaConf Recolvers ============= #
OmegaConf.register_new_resolver(
"calc_exp_lr_decay_rate", lambda factor, n: factor ** (1.0 / n)
)
OmegaConf.register_new_resolver("add", lambda a, b: a + b)
OmegaConf.register_new_resolver("sub", lambda a, b: a - b)
OmegaConf.register_new_resolver("mul", lambda a, b: a * b)
OmegaConf.register_new_resolver("div", lambda a, b: a / b)
OmegaConf.register_new_resolver("idiv", lambda a, b: a // b)
OmegaConf.register_new_resolver("basename", lambda p: os.path.basename(p))
OmegaConf.register_new_resolver("rmspace", lambda s, sub: s.replace(" ", sub))
OmegaConf.register_new_resolver("tuple2", lambda s: [float(s), float(s)])
OmegaConf.register_new_resolver("gt0", lambda s: s > 0)
OmegaConf.register_new_resolver("cmaxgt0", lambda s: C_max(s) > 0)
OmegaConf.register_new_resolver("not", lambda s: not s)
OmegaConf.register_new_resolver(
"cmaxgt0orcmaxgt0", lambda a, b: C_max(a) > 0 or C_max(b) > 0
)
# ======================================================= #
def C_max(value: Any) -> float:
if isinstance(value, int) or isinstance(value, float):
pass
else:
value = config_to_primitive(value)
if not isinstance(value, list):
raise TypeError("Scalar specification only supports list, got", type(value))
if len(value) == 3:
value = [0] + value
assert len(value) == 4
start_step, start_value, end_value, end_step = value
value = max(start_value, end_value)
return value
@dataclass
class ExperimentConfig:
name: str = "default"
description: str = ""
tag: str = ""
seed: int = 0
use_timestamp: bool = True
timestamp: Optional[str] = None
exp_root_dir: str = "outputs"
### these shouldn't be set manually
exp_dir: str = "outputs/default"
trial_name: str = "exp"
trial_dir: str = "outputs/default/exp"
n_gpus: int = 1
###
resume: Optional[str] = None
data_type: str = ""
data: dict = field(default_factory=dict)
system_type: str = ""
system: dict = field(default_factory=dict)
# accept pytorch-lightning trainer parameters
# see https://lightning.ai/docs/pytorch/stable/common/trainer.html#trainer-class-api
trainer: dict = field(default_factory=dict)
# accept pytorch-lightning checkpoint callback parameters
# see https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html#modelcheckpoint
checkpoint: dict = field(default_factory=dict)
def __post_init__(self):
if not self.tag and not self.use_timestamp:
raise ValueError("Either tag is specified or use_timestamp is True.")
self.trial_name = self.tag
# if resume from an existing config, self.timestamp should not be None
if self.timestamp is None:
self.timestamp = ""
if self.use_timestamp:
if self.n_gpus > 1:
threestudio.warn(
"Timestamp is disabled when using multiple GPUs, please make sure you have a unique tag."
)
else:
self.timestamp = datetime.now().strftime("@%Y%m%d-%H%M%S")
self.trial_name += self.timestamp
self.exp_dir = os.path.join(self.exp_root_dir, self.name)
self.trial_dir = os.path.join(self.exp_dir, self.trial_name)
os.makedirs(self.trial_dir, exist_ok=True)
def load_config(*yamls: str, cli_args: list = [], from_string=False, **kwargs) -> Any:
if from_string:
yaml_confs = [OmegaConf.create(s) for s in yamls]
else:
yaml_confs = [OmegaConf.load(f) for f in yamls]
cli_conf = OmegaConf.from_cli(cli_args)
cfg = OmegaConf.merge(*yaml_confs, cli_conf, kwargs)
OmegaConf.resolve(cfg)
assert isinstance(cfg, DictConfig)
scfg = parse_structured(ExperimentConfig, cfg)
return scfg
def config_to_primitive(config, resolve: bool = True) -> Any:
return OmegaConf.to_container(config, resolve=resolve)
def dump_config(path: str, config) -> None:
with open(path, "w") as fp:
OmegaConf.save(config=config, f=fp)
def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:
scfg = OmegaConf.structured(fields(**cfg))
return scfg
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/__init__.py | threestudio/utils/__init__.py | from . import base
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/base.py | threestudio/utils/base.py | from dataclasses import dataclass
import torch
import torch.nn as nn
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import get_device, load_module_weights
from threestudio.utils.typing import *
class Configurable:
@dataclass
class Config:
pass
def __init__(self, cfg: Optional[dict] = None) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
class Updateable:
def do_update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
):
for attr in self.__dir__():
if attr.startswith("_"):
continue
try:
module = getattr(self, attr)
except:
continue # ignore attributes like property, which can't be retrived using getattr?
if isinstance(module, Updateable):
module.do_update_step(
epoch, global_step, on_load_weights=on_load_weights
)
self.update_step(epoch, global_step, on_load_weights=on_load_weights)
def do_update_step_end(self, epoch: int, global_step: int):
for attr in self.__dir__():
if attr.startswith("_"):
continue
try:
module = getattr(self, attr)
except:
continue # ignore attributes like property, which can't be retrived using getattr?
if isinstance(module, Updateable):
module.do_update_step_end(epoch, global_step)
self.update_step_end(epoch, global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# override this method to implement custom update logic
# if on_load_weights is True, you should be careful doing things related to model evaluations,
# as the models and tensors are not guarenteed to be on the same device
pass
def update_step_end(self, epoch: int, global_step: int):
pass
def update_if_possible(module: Any, epoch: int, global_step: int) -> None:
if isinstance(module, Updateable):
module.do_update_step(epoch, global_step)
def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:
if isinstance(module, Updateable):
module.do_update_step_end(epoch, global_step)
class BaseObject(Updateable):
@dataclass
class Config:
pass
cfg: Config # add this to every subclass of BaseObject to enable static type checking
def __init__(
self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs
) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self.device = get_device()
self.configure(*args, **kwargs)
def configure(self, *args, **kwargs) -> None:
pass
class BaseModule(nn.Module, Updateable):
@dataclass
class Config:
weights: Optional[str] = None
cfg: Config # add this to every subclass of BaseModule to enable static type checking
def __init__(
self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs
) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self.device = get_device()
self.configure(*args, **kwargs)
if self.cfg.weights is not None:
# format: path/to/weights:module_name
weights_path, module_name = self.cfg.weights.split(":")
state_dict, epoch, global_step = load_module_weights(
weights_path, module_name=module_name, map_location="cpu"
)
self.load_state_dict(state_dict)
self.do_update_step(
epoch, global_step, on_load_weights=True
) # restore states
# dummy tensor to indicate model state
self._dummy: Float[Tensor, "..."]
self.register_buffer("_dummy", torch.zeros(0).float(), persistent=False)
def configure(self, *args, **kwargs) -> None:
pass
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/saving.py | threestudio/utils/saving.py | import json
import os
import re
import shutil
import cv2
import imageio
import matplotlib.pyplot as plt
import numpy as np
import torch
import trimesh
import wandb
from matplotlib import cm
from matplotlib.colors import LinearSegmentedColormap
from PIL import Image, ImageDraw
from pytorch_lightning.loggers import WandbLogger
from threestudio.models.mesh import Mesh
from threestudio.utils.typing import *
class SaverMixin:
_save_dir: Optional[str] = None
_wandb_logger: Optional[WandbLogger] = None
def set_save_dir(self, save_dir: str):
self._save_dir = save_dir
def get_save_dir(self):
if self._save_dir is None:
raise ValueError("Save dir is not set")
return self._save_dir
def convert_data(self, data):
if data is None:
return None
elif isinstance(data, np.ndarray):
return data
elif isinstance(data, torch.Tensor):
return data.detach().cpu().numpy()
elif isinstance(data, list):
return [self.convert_data(d) for d in data]
elif isinstance(data, dict):
return {k: self.convert_data(v) for k, v in data.items()}
else:
raise TypeError(
"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting",
type(data),
)
def get_save_path(self, filename):
save_path = os.path.join(self.get_save_dir(), filename)
os.makedirs(os.path.dirname(save_path), exist_ok=True)
return save_path
def create_loggers(self, cfg_loggers: DictConfig) -> None:
if "wandb" in cfg_loggers.keys() and cfg_loggers.wandb.enable:
self._wandb_logger = WandbLogger(
project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name
)
def get_loggers(self) -> List:
if self._wandb_logger:
return [self._wandb_logger]
else:
return []
DEFAULT_RGB_KWARGS = {"data_format": "HWC", "data_range": (0, 1)}
DEFAULT_UV_KWARGS = {
"data_format": "HWC",
"data_range": (0, 1),
"cmap": "checkerboard",
}
DEFAULT_GRAYSCALE_KWARGS = {"data_range": None, "cmap": "jet"}
DEFAULT_GRID_KWARGS = {"align": "max"}
def get_rgb_image_(self, img, data_format, data_range, rgba=False):
img = self.convert_data(img)
assert data_format in ["CHW", "HWC"]
if data_format == "CHW":
img = img.transpose(1, 2, 0)
if img.dtype != np.uint8:
img = img.clip(min=data_range[0], max=data_range[1])
img = (
(img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0
).astype(np.uint8)
nc = 4 if rgba else 3
imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]
imgs = [
img_
if img_.shape[-1] == nc
else np.concatenate(
[
img_,
np.zeros(
(img_.shape[0], img_.shape[1], nc - img_.shape[2]),
dtype=img_.dtype,
),
],
axis=-1,
)
for img_ in imgs
]
img = np.concatenate(imgs, axis=1)
if rgba:
img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)
else:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def _save_rgb_image(
self,
filename,
img,
data_format,
data_range,
name: Optional[str] = None,
step: Optional[int] = None,
):
img = self.get_rgb_image_(img, data_format, data_range)
cv2.imwrite(filename, img)
if name and self._wandb_logger:
wandb.log(
{
name: wandb.Image(self.get_save_path(filename)),
"trainer/global_step": step,
}
)
def save_rgb_image(
self,
filename,
img,
data_format=DEFAULT_RGB_KWARGS["data_format"],
data_range=DEFAULT_RGB_KWARGS["data_range"],
name: Optional[str] = None,
step: Optional[int] = None,
) -> str:
save_path = self.get_save_path(filename)
self._save_rgb_image(save_path, img, data_format, data_range, name, step)
return save_path
def get_uv_image_(self, img, data_format, data_range, cmap):
img = self.convert_data(img)
assert data_format in ["CHW", "HWC"]
if data_format == "CHW":
img = img.transpose(1, 2, 0)
img = img.clip(min=data_range[0], max=data_range[1])
img = (img - data_range[0]) / (data_range[1] - data_range[0])
assert cmap in ["checkerboard", "color"]
if cmap == "checkerboard":
n_grid = 64
mask = (img * n_grid).astype(int)
mask = (mask[..., 0] + mask[..., 1]) % 2 == 0
img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255
img[mask] = np.array([255, 0, 255], dtype=np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
elif cmap == "color":
img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)
img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)
img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)
img = img_
return img
def save_uv_image(
self,
filename,
img,
data_format=DEFAULT_UV_KWARGS["data_format"],
data_range=DEFAULT_UV_KWARGS["data_range"],
cmap=DEFAULT_UV_KWARGS["cmap"],
) -> str:
save_path = self.get_save_path(filename)
img = self.get_uv_image_(img, data_format, data_range, cmap)
cv2.imwrite(save_path, img)
return save_path
def get_grayscale_image_(self, img, data_range, cmap):
img = self.convert_data(img)
img = np.nan_to_num(img)
if data_range is None:
img = (img - img.min()) / (img.max() - img.min())
else:
img = img.clip(data_range[0], data_range[1])
img = (img - data_range[0]) / (data_range[1] - data_range[0])
assert cmap in [None, "jet", "magma", "spectral"]
if cmap == None:
img = (img * 255.0).astype(np.uint8)
img = np.repeat(img[..., None], 3, axis=2)
elif cmap == "jet":
img = (img * 255.0).astype(np.uint8)
img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
elif cmap == "magma":
img = 1.0 - img
base = cm.get_cmap("magma")
num_bins = 256
colormap = LinearSegmentedColormap.from_list(
f"{base.name}{num_bins}", base(np.linspace(0, 1, num_bins)), num_bins
)(np.linspace(0, 1, num_bins))[:, :3]
a = np.floor(img * 255.0)
b = (a + 1).clip(max=255.0)
f = img * 255.0 - a
a = a.astype(np.uint16).clip(0, 255)
b = b.astype(np.uint16).clip(0, 255)
img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]
img = (img * 255.0).astype(np.uint8)
elif cmap == "spectral":
colormap = plt.get_cmap("Spectral")
def blend_rgba(image):
image = image[..., :3] * image[..., -1:] + (
1.0 - image[..., -1:]
) # blend A to RGB
return image
img = colormap(img)
img = blend_rgba(img)
img = (img * 255).astype(np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def _save_grayscale_image(
self,
filename,
img,
data_range,
cmap,
name: Optional[str] = None,
step: Optional[int] = None,
):
img = self.get_grayscale_image_(img, data_range, cmap)
cv2.imwrite(filename, img)
if name and self._wandb_logger:
wandb.log(
{
name: wandb.Image(self.get_save_path(filename)),
"trainer/global_step": step,
}
)
def save_grayscale_image(
self,
filename,
img,
data_range=DEFAULT_GRAYSCALE_KWARGS["data_range"],
cmap=DEFAULT_GRAYSCALE_KWARGS["cmap"],
name: Optional[str] = None,
step: Optional[int] = None,
) -> str:
save_path = self.get_save_path(filename)
self._save_grayscale_image(save_path, img, data_range, cmap, name, step)
return save_path
def get_image_grid_(self, imgs, align):
if isinstance(imgs[0], list):
return np.concatenate(
[self.get_image_grid_(row, align) for row in imgs], axis=0
)
cols = []
for col in imgs:
assert col["type"] in ["rgb", "uv", "grayscale"]
if col["type"] == "rgb":
rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()
rgb_kwargs.update(col["kwargs"])
cols.append(self.get_rgb_image_(col["img"], **rgb_kwargs))
elif col["type"] == "uv":
uv_kwargs = self.DEFAULT_UV_KWARGS.copy()
uv_kwargs.update(col["kwargs"])
cols.append(self.get_uv_image_(col["img"], **uv_kwargs))
elif col["type"] == "grayscale":
grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()
grayscale_kwargs.update(col["kwargs"])
cols.append(self.get_grayscale_image_(col["img"], **grayscale_kwargs))
if align == "max":
h = max([col.shape[0] for col in cols])
w = max([col.shape[1] for col in cols])
elif align == "min":
h = min([col.shape[0] for col in cols])
w = min([col.shape[1] for col in cols])
elif isinstance(align, int):
h = align
w = align
elif (
isinstance(align, tuple)
and isinstance(align[0], int)
and isinstance(align[1], int)
):
h, w = align
else:
raise ValueError(
f"Unsupported image grid align: {align}, should be min, max, int or (int, int)"
)
for i in range(len(cols)):
if cols[i].shape[0] != h or cols[i].shape[1] != w:
cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)
return np.concatenate(cols, axis=1)
def save_image_grid(
self,
filename,
imgs,
align=DEFAULT_GRID_KWARGS["align"],
name: Optional[str] = None,
step: Optional[int] = None,
texts: Optional[List[float]] = None,
):
save_path = self.get_save_path(filename)
img = self.get_image_grid_(imgs, align=align)
if texts is not None:
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
black, white = (0, 0, 0), (255, 255, 255)
for i, text in enumerate(texts):
draw.text((2, (img.size[1] // len(texts)) * i + 1), f"{text}", white)
draw.text((0, (img.size[1] // len(texts)) * i + 1), f"{text}", white)
draw.text((2, (img.size[1] // len(texts)) * i - 1), f"{text}", white)
draw.text((0, (img.size[1] // len(texts)) * i - 1), f"{text}", white)
draw.text((1, (img.size[1] // len(texts)) * i), f"{text}", black)
img = np.asarray(img)
cv2.imwrite(save_path, img)
if name and self._wandb_logger:
wandb.log({name: wandb.Image(save_path), "trainer/global_step": step})
return save_path
def save_image(self, filename, img) -> str:
save_path = self.get_save_path(filename)
img = self.convert_data(img)
assert img.dtype == np.uint8 or img.dtype == np.uint16
if img.ndim == 3 and img.shape[-1] == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
elif img.ndim == 3 and img.shape[-1] == 4:
img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)
cv2.imwrite(save_path, img)
return save_path
def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:
save_path = self.get_save_path(filename)
img = self.convert_data(img)
assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]
imgs_full = []
for start in range(0, img.shape[-1], 3):
img_ = img[..., start : start + 3]
img_ = np.stack(
[
self.get_rgb_image_(img_[i], "HWC", data_range, rgba=rgba)
for i in range(img_.shape[0])
],
axis=0,
)
size = img_.shape[1]
placeholder = np.zeros((size, size, 3), dtype=np.float32)
img_full = np.concatenate(
[
np.concatenate(
[placeholder, img_[2], placeholder, placeholder], axis=1
),
np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),
np.concatenate(
[placeholder, img_[3], placeholder, placeholder], axis=1
),
],
axis=0,
)
imgs_full.append(img_full)
imgs_full = np.concatenate(imgs_full, axis=1)
cv2.imwrite(save_path, imgs_full)
return save_path
def save_data(self, filename, data) -> str:
data = self.convert_data(data)
if isinstance(data, dict):
if not filename.endswith(".npz"):
filename += ".npz"
save_path = self.get_save_path(filename)
np.savez(save_path, **data)
else:
if not filename.endswith(".npy"):
filename += ".npy"
save_path = self.get_save_path(filename)
np.save(save_path, data)
return save_path
def save_state_dict(self, filename, data) -> str:
save_path = self.get_save_path(filename)
torch.save(data, save_path)
return save_path
def save_img_sequence(
self,
filename,
img_dir,
matcher,
save_format="mp4",
fps=30,
name: Optional[str] = None,
step: Optional[int] = None,
) -> str:
assert save_format in ["gif", "mp4"]
if not filename.endswith(save_format):
filename += f".{save_format}"
save_path = self.get_save_path(filename)
matcher = re.compile(matcher)
img_dir = os.path.join(self.get_save_dir(), img_dir)
imgs = []
for f in os.listdir(img_dir):
if matcher.search(f):
imgs.append(f)
imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))
imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]
if save_format == "gif":
imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]
imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)
elif save_format == "mp4":
imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]
imageio.mimsave(save_path, imgs, fps=fps)
if name and self._wandb_logger:
wandb.log(
{
name: wandb.Video(save_path, format="mp4"),
"trainer/global_step": step,
}
)
return save_path
def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:
save_path = self.get_save_path(filename)
v_pos = self.convert_data(v_pos)
t_pos_idx = self.convert_data(t_pos_idx)
mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)
mesh.export(save_path)
return save_path
def save_obj(
self,
filename: str,
mesh: Mesh,
save_mat: bool = False,
save_normal: bool = False,
save_uv: bool = False,
save_vertex_color: bool = False,
map_Kd: Optional[Float[Tensor, "H W 3"]] = None,
map_Ks: Optional[Float[Tensor, "H W 3"]] = None,
map_Bump: Optional[Float[Tensor, "H W 3"]] = None,
map_Pm: Optional[Float[Tensor, "H W 1"]] = None,
map_Pr: Optional[Float[Tensor, "H W 1"]] = None,
map_format: str = "jpg",
) -> List[str]:
save_paths: List[str] = []
if not filename.endswith(".obj"):
filename += ".obj"
v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(
mesh.t_pos_idx
)
v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None
if save_normal:
v_nrm = self.convert_data(mesh.v_nrm)
if save_uv:
v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(
mesh.t_tex_idx
)
if save_vertex_color:
v_rgb = self.convert_data(mesh.v_rgb)
matname, mtllib = None, None
if save_mat:
matname = "default"
mtl_filename = filename.replace(".obj", ".mtl")
mtllib = os.path.basename(mtl_filename)
mtl_save_paths = self._save_mtl(
mtl_filename,
matname,
map_Kd=self.convert_data(map_Kd),
map_Ks=self.convert_data(map_Ks),
map_Bump=self.convert_data(map_Bump),
map_Pm=self.convert_data(map_Pm),
map_Pr=self.convert_data(map_Pr),
map_format=map_format,
)
save_paths += mtl_save_paths
obj_save_path = self._save_obj(
filename,
v_pos,
t_pos_idx,
v_nrm=v_nrm,
v_tex=v_tex,
t_tex_idx=t_tex_idx,
v_rgb=v_rgb,
matname=matname,
mtllib=mtllib,
)
save_paths.append(obj_save_path)
return save_paths
def _save_obj(
self,
filename,
v_pos,
t_pos_idx,
v_nrm=None,
v_tex=None,
t_tex_idx=None,
v_rgb=None,
matname=None,
mtllib=None,
) -> str:
obj_str = ""
if matname is not None:
obj_str += f"mtllib {mtllib}\n"
obj_str += f"g object\n"
obj_str += f"usemtl {matname}\n"
for i in range(len(v_pos)):
obj_str += f"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}"
if v_rgb is not None:
obj_str += f" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}"
obj_str += "\n"
if v_nrm is not None:
for v in v_nrm:
obj_str += f"vn {v[0]} {v[1]} {v[2]}\n"
if v_tex is not None:
for v in v_tex:
obj_str += f"vt {v[0]} {1.0 - v[1]}\n"
for i in range(len(t_pos_idx)):
obj_str += "f"
for j in range(3):
obj_str += f" {t_pos_idx[i][j] + 1}/"
if v_tex is not None:
obj_str += f"{t_tex_idx[i][j] + 1}"
obj_str += "/"
if v_nrm is not None:
obj_str += f"{t_pos_idx[i][j] + 1}"
obj_str += "\n"
save_path = self.get_save_path(filename)
with open(save_path, "w") as f:
f.write(obj_str)
return save_path
def _save_mtl(
self,
filename,
matname,
Ka=(0.0, 0.0, 0.0),
Kd=(1.0, 1.0, 1.0),
Ks=(0.0, 0.0, 0.0),
map_Kd=None,
map_Ks=None,
map_Bump=None,
map_Pm=None,
map_Pr=None,
map_format="jpg",
step: Optional[int] = None,
) -> List[str]:
mtl_save_path = self.get_save_path(filename)
save_paths = [mtl_save_path]
mtl_str = f"newmtl {matname}\n"
mtl_str += f"Ka {Ka[0]} {Ka[1]} {Ka[2]}\n"
if map_Kd is not None:
map_Kd_save_path = os.path.join(
os.path.dirname(mtl_save_path), f"texture_kd.{map_format}"
)
mtl_str += f"map_Kd texture_kd.{map_format}\n"
self._save_rgb_image(
map_Kd_save_path,
map_Kd,
data_format="HWC",
data_range=(0, 1),
name=f"{matname}_Kd",
step=step,
)
save_paths.append(map_Kd_save_path)
else:
mtl_str += f"Kd {Kd[0]} {Kd[1]} {Kd[2]}\n"
if map_Ks is not None:
map_Ks_save_path = os.path.join(
os.path.dirname(mtl_save_path), f"texture_ks.{map_format}"
)
mtl_str += f"map_Ks texture_ks.{map_format}\n"
self._save_rgb_image(
map_Ks_save_path,
map_Ks,
data_format="HWC",
data_range=(0, 1),
name=f"{matname}_Ks",
step=step,
)
save_paths.append(map_Ks_save_path)
else:
mtl_str += f"Ks {Ks[0]} {Ks[1]} {Ks[2]}\n"
if map_Bump is not None:
map_Bump_save_path = os.path.join(
os.path.dirname(mtl_save_path), f"texture_nrm.{map_format}"
)
mtl_str += f"map_Bump texture_nrm.{map_format}\n"
self._save_rgb_image(
map_Bump_save_path,
map_Bump,
data_format="HWC",
data_range=(0, 1),
name=f"{matname}_Bump",
step=step,
)
save_paths.append(map_Bump_save_path)
if map_Pm is not None:
map_Pm_save_path = os.path.join(
os.path.dirname(mtl_save_path), f"texture_metallic.{map_format}"
)
mtl_str += f"map_Pm texture_metallic.{map_format}\n"
self._save_grayscale_image(
map_Pm_save_path,
map_Pm,
data_range=(0, 1),
cmap=None,
name=f"{matname}_refl",
step=step,
)
save_paths.append(map_Pm_save_path)
if map_Pr is not None:
map_Pr_save_path = os.path.join(
os.path.dirname(mtl_save_path), f"texture_roughness.{map_format}"
)
mtl_str += f"map_Pr texture_roughness.{map_format}\n"
self._save_grayscale_image(
map_Pr_save_path,
map_Pr,
data_range=(0, 1),
cmap=None,
name=f"{matname}_Ns",
step=step,
)
save_paths.append(map_Pr_save_path)
with open(self.get_save_path(filename), "w") as f:
f.write(mtl_str)
return save_paths
def save_file(self, filename, src_path) -> str:
save_path = self.get_save_path(filename)
shutil.copyfile(src_path, save_path)
return save_path
def save_json(self, filename, payload) -> str:
save_path = self.get_save_path(filename)
with open(save_path, "w") as f:
f.write(json.dumps(payload))
return save_path
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/perceptual/utils.py | threestudio/utils/perceptual/utils.py | import hashlib
import os
import requests
from tqdm import tqdm
URL_MAP = {"vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"}
CKPT_MAP = {"vgg_lpips": "vgg.pth"}
MD5_MAP = {"vgg_lpips": "d507d7349b931f0638a25a48a722f98a"}
def download(url, local_path, chunk_size=1024):
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
with requests.get(url, stream=True) as r:
total_size = int(r.headers.get("content-length", 0))
with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
with open(local_path, "wb") as f:
for data in r.iter_content(chunk_size=chunk_size):
if data:
f.write(data)
pbar.update(chunk_size)
def md5_hash(path):
with open(path, "rb") as f:
content = f.read()
return hashlib.md5(content).hexdigest()
def get_ckpt_path(name, root, check=False):
assert name in URL_MAP
path = os.path.join(root, CKPT_MAP[name])
if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
download(URL_MAP[name], path)
md5 = md5_hash(path)
assert md5 == MD5_MAP[name], md5
return path
class KeyNotFoundError(Exception):
def __init__(self, cause, keys=None, visited=None):
self.cause = cause
self.keys = keys
self.visited = visited
messages = list()
if keys is not None:
messages.append("Key not found: {}".format(keys))
if visited is not None:
messages.append("Visited: {}".format(visited))
messages.append("Cause:\n{}".format(cause))
message = "\n".join(messages)
super().__init__(message)
def retrieve(
list_or_dict, key, splitval="/", default=None, expand=True, pass_success=False
):
"""Given a nested list or dict return the desired value at key expanding
callable nodes if necessary and :attr:`expand` is ``True``. The expansion
is done in-place.
Parameters
----------
list_or_dict : list or dict
Possibly nested list or dictionary.
key : str
key/to/value, path like string describing all keys necessary to
consider to get to the desired value. List indices can also be
passed here.
splitval : str
String that defines the delimiter between keys of the
different depth levels in `key`.
default : obj
Value returned if :attr:`key` is not found.
expand : bool
Whether to expand callable nodes on the path or not.
Returns
-------
The desired value or if :attr:`default` is not ``None`` and the
:attr:`key` is not found returns ``default``.
Raises
------
Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is
``None``.
"""
keys = key.split(splitval)
success = True
try:
visited = []
parent = None
last_key = None
for key in keys:
if callable(list_or_dict):
if not expand:
raise KeyNotFoundError(
ValueError(
"Trying to get past callable node with expand=False."
),
keys=keys,
visited=visited,
)
list_or_dict = list_or_dict()
parent[last_key] = list_or_dict
last_key = key
parent = list_or_dict
try:
if isinstance(list_or_dict, dict):
list_or_dict = list_or_dict[key]
else:
list_or_dict = list_or_dict[int(key)]
except (KeyError, IndexError, ValueError) as e:
raise KeyNotFoundError(e, keys=keys, visited=visited)
visited += [key]
# final expansion of retrieved value
if expand and callable(list_or_dict):
list_or_dict = list_or_dict()
parent[last_key] = list_or_dict
except KeyNotFoundError as e:
if default is None:
raise e
else:
list_or_dict = default
success = False
if not pass_success:
return list_or_dict
else:
return list_or_dict, success
if __name__ == "__main__":
config = {
"keya": "a",
"keyb": "b",
"keyc": {
"cc1": 1,
"cc2": 2,
},
}
from omegaconf import OmegaConf
config = OmegaConf.create(config)
print(config)
retrieve(config, "keya")
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/perceptual/perceptual.py | threestudio/utils/perceptual/perceptual.py | """Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
from collections import namedtuple
import torch
import torch.nn as nn
from torchvision import models
from threestudio.utils.perceptual.utils import get_ckpt_path
class PerceptualLoss(nn.Module):
# Learned perceptual metric
def __init__(self, use_dropout=True):
super().__init__()
self.scaling_layer = ScalingLayer()
self.chns = [64, 128, 256, 512, 512] # vg16 features
self.net = vgg16(pretrained=True, requires_grad=False)
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.load_from_pretrained()
for param in self.parameters():
param.requires_grad = False
def load_from_pretrained(self, name="vgg_lpips"):
ckpt = get_ckpt_path(name, "threestudio/utils/lpips")
self.load_state_dict(
torch.load(ckpt, map_location=torch.device("cpu")), strict=False
)
print("loaded pretrained LPIPS loss from {}".format(ckpt))
@classmethod
def from_pretrained(cls, name="vgg_lpips"):
if name != "vgg_lpips":
raise NotImplementedError
model = cls()
ckpt = get_ckpt_path(name)
model.load_state_dict(
torch.load(ckpt, map_location=torch.device("cpu")), strict=False
)
return model
def forward(self, input, target):
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
outs0, outs1 = self.net(in0_input), self.net(in1_input)
feats0, feats1, diffs = {}, {}, {}
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
for kk in range(len(self.chns)):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(
outs1[kk]
)
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
res = [
spatial_average(lins[kk].model(diffs[kk]), keepdim=True)
for kk in range(len(self.chns))
]
val = res[0]
for l in range(1, len(self.chns)):
val += res[l]
return val
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer(
"shift", torch.Tensor([-0.030, -0.088, -0.188])[None, :, None, None]
)
self.register_buffer(
"scale", torch.Tensor([0.458, 0.448, 0.450])[None, :, None, None]
)
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
"""A single linear layer which does a 1x1 conv"""
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = (
[
nn.Dropout(),
]
if (use_dropout)
else []
)
layers += [
nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),
]
self.model = nn.Sequential(*layers)
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple(
"VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"]
)
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
def normalize_tensor(x, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x**2, dim=1, keepdim=True))
return x / (norm_factor + eps)
def spatial_average(x, keepdim=True):
return x.mean([2, 3], keepdim=keepdim)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/perceptual/__init__.py | threestudio/utils/perceptual/__init__.py | from .perceptual import PerceptualLoss
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/vae.py | threestudio/utils/GAN/vae.py | # pytorch_diffusion + derived encoder decoder
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from threestudio.utils.GAN.attention import LinearAttention
from threestudio.utils.GAN.util import instantiate_from_config
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
def nonlinearity(x):
# swish
return x * torch.sigmoid(x)
def Normalize(in_channels, num_groups=32):
return torch.nn.BatchNorm2d(num_features=in_channels)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=2, padding=0
)
def forward(self, x):
if self.with_conv:
pad = (0, 1, 0, 1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout,
temb_channels=512,
):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels, out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
else:
self.nin_shortcut = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=1, stride=1, padding=0
)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x + h
class LinAttnBlock(LinearAttention):
"""to match AttnBlock usage"""
def __init__(self, in_channels):
super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.k = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.v = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.proj_out = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b, c, h, w = q.shape
q = q.reshape(b, c, h * w)
q = q.permute(0, 2, 1) # b,hw,c
k = k.reshape(b, c, h * w) # b,c,hw
w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c) ** (-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b, c, h * w)
w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b, c, h, w)
h_ = self.proj_out(h_)
return x + h_
def make_attn(in_channels, attn_type="vanilla"):
assert attn_type in ["vanilla", "linear", "none"], f"attn_type {attn_type} unknown"
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
if attn_type == "vanilla":
return AttnBlock(in_channels)
elif attn_type == "none":
return nn.Identity(in_channels)
else:
return LinAttnBlock(in_channels)
class Model(nn.Module):
def __init__(
self,
*,
ch,
out_ch,
ch_mult=(1, 2, 4, 8),
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
in_channels,
resolution,
use_timestep=True,
use_linear_attn=False,
attn_type="vanilla",
):
super().__init__()
if use_linear_attn:
attn_type = "linear"
self.ch = ch
self.temb_ch = self.ch * 4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList(
[
torch.nn.Linear(self.ch, self.temb_ch),
torch.nn.Linear(self.temb_ch, self.temb_ch),
]
)
# downsampling
self.conv_in = torch.nn.Conv2d(
in_channels, self.ch, kernel_size=3, stride=1, padding=1
)
curr_res = resolution
in_ch_mult = (1,) + tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch * in_ch_mult[i_level]
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch * ch_mult[i_level]
skip_in = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
if i_block == self.num_res_blocks:
skip_in = ch * in_ch_mult[i_level]
block.append(
ResnetBlock(
in_channels=block_in + skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in, out_ch, kernel_size=3, stride=1, padding=1
)
def forward(self, x, t=None, context=None):
# assert x.shape[2] == x.shape[3] == self.resolution
if context is not None:
# assume aligned context, cat along channel axis
x = torch.cat((x, context), dim=1)
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb
)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
def get_last_layer(self):
return self.conv_out.weight
class Encoder(nn.Module):
def __init__(
self,
*,
ch,
out_ch,
ch_mult=(1, 2, 4, 8),
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
in_channels,
resolution,
z_channels,
double_z=True,
use_linear_attn=False,
attn_type="vanilla",
**ignore_kwargs,
):
super().__init__()
if use_linear_attn:
attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.attn_resolutions = attn_resolutions
# downsampling
self.conv_in = torch.nn.Conv2d(
in_channels, self.ch, kernel_size=3, stride=1, padding=1
)
curr_res = resolution
in_ch_mult = (1,) + tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch * in_ch_mult[i_level]
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
if len(attn_resolutions) > 0:
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in,
2 * z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1,
)
def forward(self, x):
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
if len(self.attn_resolutions) > 0:
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(
self,
*,
ch,
out_ch,
ch_mult=(1, 2, 4, 8),
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
in_channels,
resolution,
z_channels,
give_pre_end=False,
tanh_out=False,
use_linear_attn=False,
attn_type="vanilla",
**ignorekwargs,
):
super().__init__()
if use_linear_attn:
attn_type = "linear"
self.ch = ch
# self.temb_ch = 3
self.temb_ch = 64
# self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
self.attn_resolutions = attn_resolutions
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,) + tuple(ch_mult)
block_in = ch * ch_mult[self.num_resolutions - 1]
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
print(
"Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)
)
)
# z to block_in
self.conv_in = torch.nn.Conv2d(
z_channels, block_in, kernel_size=3, stride=1, padding=1
)
self.conv_in3 = torch.nn.Conv2d(
z_channels + 3, block_in, kernel_size=3, stride=1, padding=1
)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
up.rgb_conv = torch.nn.Conv2d(
block_in + 3, 3, kernel_size=3, stride=1, padding=1
)
up.rgb_cat_conv = torch.nn.Conv2d(
block_in + 3, block_in, kernel_size=3, stride=1, padding=1
)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in, out_ch, kernel_size=3, stride=1, padding=1
)
def forward(self, z, temb=None):
# assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
# temb = None
# z to block_in
rgb = z[:, :3]
if z.shape[1] == self.z_shape[1] + 3:
h = self.conv_in3(z)
else:
h = self.conv_in(z)
# middle
# h = self.mid.block_1(h, temb)
# h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
rgb = torch.nn.functional.interpolate(rgb, scale_factor=4.0, mode="bilinear")
rgb = torch.sigmoid(torch.logit(rgb, eps=1e-3) + h)
return rgb
class SimpleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__()
self.model = nn.ModuleList(
[
nn.Conv2d(in_channels, in_channels, 1),
ResnetBlock(
in_channels=in_channels,
out_channels=2 * in_channels,
temb_channels=0,
dropout=0.0,
),
ResnetBlock(
in_channels=2 * in_channels,
out_channels=4 * in_channels,
temb_channels=0,
dropout=0.0,
),
ResnetBlock(
in_channels=4 * in_channels,
out_channels=2 * in_channels,
temb_channels=0,
dropout=0.0,
),
nn.Conv2d(2 * in_channels, in_channels, 1),
Upsample(in_channels, with_conv=True),
]
)
# end
self.norm_out = Normalize(in_channels)
self.conv_out = torch.nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
for i, layer in enumerate(self.model):
if i in [1, 2, 3]:
x = layer(x, None)
else:
x = layer(x)
h = self.norm_out(x)
h = nonlinearity(h)
x = self.conv_out(h)
return x
class UpsampleDecoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
ch,
num_res_blocks,
resolution,
ch_mult=(2, 2),
dropout=0.0,
):
super().__init__()
# upsampling
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
block_in = in_channels
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.upsample_blocks = nn.ModuleList()
self.rgb_blocks = nn.ModuleList()
for i_level in range(self.num_resolutions):
res_block = []
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
res_block.append(
ResnetBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
)
)
block_in = block_out
self.res_blocks.append(nn.ModuleList(res_block))
if i_level != self.num_resolutions - 1:
self.upsample_blocks.append(Upsample(block_in, True))
curr_res = curr_res * 2
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(
block_in, out_channels, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
# upsampling
h = x
for k, i_level in enumerate(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.res_blocks[i_level][i_block](h, None)
if i_level != self.num_resolutions - 1:
h = self.upsample_blocks[k](h)
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class LatentRescaler(nn.Module):
def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
super().__init__()
# residual block, interpolate, residual block
self.factor = factor
self.conv_in = nn.Conv2d(
in_channels, mid_channels, kernel_size=3, stride=1, padding=1
)
self.res_block1 = nn.ModuleList(
[
ResnetBlock(
in_channels=mid_channels,
out_channels=mid_channels,
temb_channels=0,
dropout=0.0,
)
for _ in range(depth)
]
)
self.attn = AttnBlock(mid_channels)
self.res_block2 = nn.ModuleList(
[
ResnetBlock(
in_channels=mid_channels,
out_channels=mid_channels,
temb_channels=0,
dropout=0.0,
)
for _ in range(depth)
]
)
self.conv_out = nn.Conv2d(
mid_channels,
out_channels,
kernel_size=1,
)
def forward(self, x):
x = self.conv_in(x)
for block in self.res_block1:
x = block(x, None)
x = torch.nn.functional.interpolate(
x,
size=(
int(round(x.shape[2] * self.factor)),
int(round(x.shape[3] * self.factor)),
),
)
x = self.attn(x)
for block in self.res_block2:
x = block(x, None)
x = self.conv_out(x)
return x
class MergedRescaleEncoder(nn.Module):
def __init__(
self,
in_channels,
ch,
resolution,
out_ch,
num_res_blocks,
attn_resolutions,
dropout=0.0,
resamp_with_conv=True,
ch_mult=(1, 2, 4, 8),
rescale_factor=1.0,
rescale_module_depth=1,
):
super().__init__()
intermediate_chn = ch * ch_mult[-1]
self.encoder = Encoder(
in_channels=in_channels,
num_res_blocks=num_res_blocks,
ch=ch,
ch_mult=ch_mult,
z_channels=intermediate_chn,
double_z=False,
resolution=resolution,
attn_resolutions=attn_resolutions,
dropout=dropout,
resamp_with_conv=resamp_with_conv,
out_ch=None,
)
self.rescaler = LatentRescaler(
factor=rescale_factor,
in_channels=intermediate_chn,
mid_channels=intermediate_chn,
out_channels=out_ch,
depth=rescale_module_depth,
)
def forward(self, x):
x = self.encoder(x)
x = self.rescaler(x)
return x
class MergedRescaleDecoder(nn.Module):
def __init__(
self,
z_channels,
out_ch,
resolution,
num_res_blocks,
attn_resolutions,
ch,
ch_mult=(1, 2, 4, 8),
dropout=0.0,
resamp_with_conv=True,
rescale_factor=1.0,
rescale_module_depth=1,
):
super().__init__()
tmp_chn = z_channels * ch_mult[-1]
self.decoder = Decoder(
out_ch=out_ch,
z_channels=tmp_chn,
attn_resolutions=attn_resolutions,
dropout=dropout,
resamp_with_conv=resamp_with_conv,
in_channels=None,
num_res_blocks=num_res_blocks,
ch_mult=ch_mult,
resolution=resolution,
ch=ch,
)
self.rescaler = LatentRescaler(
factor=rescale_factor,
in_channels=z_channels,
mid_channels=tmp_chn,
out_channels=tmp_chn,
depth=rescale_module_depth,
)
def forward(self, x):
x = self.rescaler(x)
x = self.decoder(x)
return x
class Upsampler(nn.Module):
def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
super().__init__()
assert out_size >= in_size
num_blocks = int(np.log2(out_size // in_size)) + 1
factor_up = 1.0 + (out_size % in_size)
print(
f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}"
)
self.rescaler = LatentRescaler(
factor=factor_up,
in_channels=in_channels,
mid_channels=2 * in_channels,
out_channels=in_channels,
)
self.decoder = Decoder(
out_ch=out_channels,
resolution=out_size,
z_channels=in_channels,
num_res_blocks=2,
attn_resolutions=[],
in_channels=None,
ch=in_channels,
ch_mult=[ch_mult for _ in range(num_blocks)],
)
def forward(self, x):
x = self.rescaler(x)
x = self.decoder(x)
return x
class Resize(nn.Module):
def __init__(self, in_channels=None, learned=False, mode="bilinear"):
super().__init__()
self.with_conv = learned
self.mode = mode
if self.with_conv:
print(
f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode"
)
raise NotImplementedError()
assert in_channels is not None
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=4, stride=2, padding=1
)
def forward(self, x, scale_factor=1.0):
if scale_factor == 1.0:
return x
else:
x = torch.nn.functional.interpolate(
x, mode=self.mode, align_corners=False, scale_factor=scale_factor
)
return x
class FirstStagePostProcessor(nn.Module):
def __init__(
self,
ch_mult: list,
in_channels,
pretrained_model: nn.Module = None,
reshape=False,
n_channels=None,
dropout=0.0,
pretrained_config=None,
):
super().__init__()
if pretrained_config is None:
assert (
pretrained_model is not None
), 'Either "pretrained_model" or "pretrained_config" must not be None'
self.pretrained_model = pretrained_model
else:
assert (
pretrained_config is not None
), 'Either "pretrained_model" or "pretrained_config" must not be None'
self.instantiate_pretrained(pretrained_config)
self.do_reshape = reshape
if n_channels is None:
n_channels = self.pretrained_model.encoder.ch
self.proj_norm = Normalize(in_channels, num_groups=in_channels // 2)
self.proj = nn.Conv2d(
in_channels, n_channels, kernel_size=3, stride=1, padding=1
)
blocks = []
downs = []
ch_in = n_channels
for m in ch_mult:
blocks.append(
ResnetBlock(
in_channels=ch_in, out_channels=m * n_channels, dropout=dropout
)
)
ch_in = m * n_channels
downs.append(Downsample(ch_in, with_conv=False))
self.model = nn.ModuleList(blocks)
self.downsampler = nn.ModuleList(downs)
def instantiate_pretrained(self, config):
model = instantiate_from_config(config)
self.pretrained_model = model.eval()
# self.pretrained_model.train = False
for param in self.pretrained_model.parameters():
param.requires_grad = False
@torch.no_grad()
def encode_with_pretrained(self, x):
c = self.pretrained_model.encode(x)
if isinstance(c, DiagonalGaussianDistribution):
c = c.mode()
return c
def forward(self, x):
z_fs = self.encode_with_pretrained(x)
z = self.proj_norm(z_fs)
z = self.proj(z)
z = nonlinearity(z)
for submodel, downmodel in zip(self.model, self.downsampler):
z = submodel(z, temb=None)
z = downmodel(z)
if self.do_reshape:
z = rearrange(z, "b c h w -> b (h w) c")
return z
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/discriminator.py | threestudio/utils/GAN/discriminator.py | import functools
import torch
import torch.nn as nn
def count_params(model):
total_params = sum(p.numel() for p in model.parameters())
return total_params
class ActNorm(nn.Module):
def __init__(
self, num_features, logdet=False, affine=True, allow_reverse_init=False
):
assert affine
super().__init__()
self.logdet = logdet
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.allow_reverse_init = allow_reverse_init
self.register_buffer("initialized", torch.tensor(0, dtype=torch.uint8))
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input, reverse=False):
if reverse:
return self.reverse(input)
if len(input.shape) == 2:
input = input[:, :, None, None]
squeeze = True
else:
squeeze = False
_, _, height, width = input.shape
if self.training and self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
h = self.scale * (input + self.loc)
if squeeze:
h = h.squeeze(-1).squeeze(-1)
if self.logdet:
log_abs = torch.log(torch.abs(self.scale))
logdet = height * width * torch.sum(log_abs)
logdet = logdet * torch.ones(input.shape[0]).to(input)
return h, logdet
return h
def reverse(self, output):
if self.training and self.initialized.item() == 0:
if not self.allow_reverse_init:
raise RuntimeError(
"Initializing ActNorm in reverse direction is "
"disabled by default. Use allow_reverse_init=True to enable."
)
else:
self.initialize(output)
self.initialized.fill_(1)
if len(output.shape) == 2:
output = output[:, :, None, None]
squeeze = True
else:
squeeze = False
h = output / self.scale - self.loc
if squeeze:
h = h.squeeze(-1).squeeze(-1)
return h
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class Labelator(AbstractEncoder):
"""Net2Net Interface for Class-Conditional Model"""
def __init__(self, n_classes, quantize_interface=True):
super().__init__()
self.n_classes = n_classes
self.quantize_interface = quantize_interface
def encode(self, c):
c = c[:, None]
if self.quantize_interface:
return c, None, [None, None, c.long()]
return c
class SOSProvider(AbstractEncoder):
# for unconditional training
def __init__(self, sos_token, quantize_interface=True):
super().__init__()
self.sos_token = sos_token
self.quantize_interface = quantize_interface
def encode(self, x):
# get batch size from data and replicate sos_token
c = torch.ones(x.shape[0], 1) * self.sos_token
c = c.long().to(x.device)
if self.quantize_interface:
return c, None, [None, None, c]
return c
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator as in Pix2Pix
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
"""
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if not use_actnorm:
norm_layer = nn.BatchNorm2d
else:
norm_layer = ActNorm
if (
type(norm_layer) == functools.partial
): # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True),
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
] # output 1 channel prediction map
self.main = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.main(input)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/util.py | threestudio/utils/GAN/util.py | import importlib
import multiprocessing as mp
from collections import abc
from functools import partial
from inspect import isfunction
from queue import Queue
from threading import Thread
import numpy as np
import torch
from einops import rearrange
from PIL import Image, ImageDraw, ImageFont
def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)
# xc a list of captions to plot
b = len(xc)
txts = list()
for bi in range(b):
txt = Image.new("RGB", wh, color="white")
draw = ImageDraw.Draw(txt)
font = ImageFont.truetype("data/DejaVuSans.ttf", size=size)
nc = int(40 * (wh[0] / 256))
lines = "\n".join(
xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)
)
try:
draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError:
print("Cant encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt)
txts = np.stack(txts)
txts = torch.tensor(txts)
return txts
def ismap(x):
if not isinstance(x, torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] > 3)
def isimage(x):
if not isinstance(x, torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def mean_flat(tensor):
"""
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters())
if verbose:
print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.")
return total_params
def instantiate_from_config(config):
if not "target" in config:
if config == "__is_first_stage__":
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False):
# create dummy dataset instance
# run prefetching
if idx_to_fn:
res = func(data, worker_id=idx)
else:
res = func(data)
Q.put([idx, res])
Q.put("Done")
def parallel_data_prefetch(
func: callable,
data,
n_proc,
target_data_type="ndarray",
cpu_intensive=True,
use_worker_id=False,
):
# if target_data_type not in ["ndarray", "list"]:
# raise ValueError(
# "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray."
# )
if isinstance(data, np.ndarray) and target_data_type == "list":
raise ValueError("list expected but function got ndarray.")
elif isinstance(data, abc.Iterable):
if isinstance(data, dict):
print(
f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.'
)
data = list(data.values())
if target_data_type == "ndarray":
data = np.asarray(data)
else:
data = list(data)
else:
raise TypeError(
f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}."
)
if cpu_intensive:
Q = mp.Queue(1000)
proc = mp.Process
else:
Q = Queue(1000)
proc = Thread
# spawn processes
if target_data_type == "ndarray":
arguments = [
[func, Q, part, i, use_worker_id]
for i, part in enumerate(np.array_split(data, n_proc))
]
else:
step = (
int(len(data) / n_proc + 1)
if len(data) % n_proc != 0
else int(len(data) / n_proc)
)
arguments = [
[func, Q, part, i, use_worker_id]
for i, part in enumerate(
[data[i : i + step] for i in range(0, len(data), step)]
)
]
processes = []
for i in range(n_proc):
p = proc(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
# start processes
print(f"Start prefetching...")
import time
start = time.time()
gather_res = [[] for _ in range(n_proc)]
try:
for p in processes:
p.start()
k = 0
while k < n_proc:
# get result
res = Q.get()
if res == "Done":
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print("Exception: ", e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f"Prefetching complete. [{time.time() - start} sec.]")
if target_data_type == "ndarray":
if not isinstance(gather_res[0], np.ndarray):
return np.concatenate([np.asarray(r) for r in gather_res], axis=0)
# order outputs
return np.concatenate(gather_res, axis=0)
elif target_data_type == "list":
out = []
for r in gather_res:
out.extend(r)
return out
else:
return gather_res
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/network_util.py | threestudio/utils/GAN/network_util.py | # adopted from
# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
# and
# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
# and
# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
#
# thanks!
import math
import os
import numpy as np
import torch
import torch.nn as nn
from einops import repeat
from threestudio.utils.GAN.util import instantiate_from_config
def make_beta_schedule(
schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
):
if schedule == "linear":
betas = (
torch.linspace(
linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64
)
** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(
linear_start, linear_end, n_timestep, dtype=torch.float64
)
elif schedule == "sqrt":
betas = (
torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
** 0.5
)
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def make_ddim_timesteps(
ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
):
if ddim_discr_method == "uniform":
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == "quad":
ddim_timesteps = (
(np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
).astype(int)
else:
raise NotImplementedError(
f'There is no ddim discretization method called "{ddim_discr_method}"'
)
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f"Selected timesteps for ddim sampler: {steps_out}")
return steps_out
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt(
(1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
)
if verbose:
print(
f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
)
print(
f"For the chosen value of eta, which is {eta}, "
f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
)
return sigmas, alphas, alphas_prev
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with torch.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = torch.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period)
* torch.arange(start=0, end=half, dtype=torch.float32)
/ half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat(
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1
)
else:
embedding = repeat(timesteps, "b -> b d", d=dim)
return embedding
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
class HybridConditioner(nn.Module):
def __init__(self, c_concat_config, c_crossattn_config):
super().__init__()
self.concat_conditioner = instantiate_from_config(c_concat_config)
self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
def forward(self, c_concat, c_crossattn):
c_concat = self.concat_conditioner(c_concat)
c_crossattn = self.crossattn_conditioner(c_crossattn)
return {"c_concat": [c_concat], "c_crossattn": [c_crossattn]}
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
shape[0], *((1,) * (len(shape) - 1))
)
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/mobilenet.py | threestudio/utils/GAN/mobilenet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ["MobileNetV3", "mobilenetv3"]
def conv_bn(
inp,
oup,
stride,
conv_layer=nn.Conv2d,
norm_layer=nn.BatchNorm2d,
nlin_layer=nn.ReLU,
):
return nn.Sequential(
conv_layer(inp, oup, 3, stride, 1, bias=False),
norm_layer(oup),
nlin_layer(inplace=True),
)
def conv_1x1_bn(
inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU
):
return nn.Sequential(
conv_layer(inp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
nlin_layer(inplace=True),
)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class SEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
Hsigmoid()
# nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class Identity(nn.Module):
def __init__(self, channel):
super(Identity, self).__init__()
def forward(self, x):
return x
def make_divisible(x, divisible_by=8):
import numpy as np
return int(np.ceil(x * 1.0 / divisible_by) * divisible_by)
class MobileBottleneck(nn.Module):
def __init__(self, inp, oup, kernel, stride, exp, se=False, nl="RE"):
super(MobileBottleneck, self).__init__()
assert stride in [1, 2]
assert kernel in [3, 5]
padding = (kernel - 1) // 2
self.use_res_connect = stride == 1 and inp == oup
conv_layer = nn.Conv2d
norm_layer = nn.BatchNorm2d
if nl == "RE":
nlin_layer = nn.ReLU # or ReLU6
elif nl == "HS":
nlin_layer = Hswish
else:
raise NotImplementedError
if se:
SELayer = SEModule
else:
SELayer = Identity
self.conv = nn.Sequential(
# pw
conv_layer(inp, exp, 1, 1, 0, bias=False),
norm_layer(exp),
nlin_layer(inplace=True),
# dw
conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False),
norm_layer(exp),
SELayer(exp),
nlin_layer(inplace=True),
# pw-linear
conv_layer(exp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV3(nn.Module):
def __init__(
self, n_class=1000, input_size=224, dropout=0.0, mode="small", width_mult=1.0
):
super(MobileNetV3, self).__init__()
input_channel = 16
last_channel = 1280
if mode == "large":
# refer to Table 1 in paper
mobile_setting = [
# k, exp, c, se, nl, s,
[3, 16, 16, False, "RE", 1],
[3, 64, 24, False, "RE", 2],
[3, 72, 24, False, "RE", 1],
[5, 72, 40, True, "RE", 2],
[5, 120, 40, True, "RE", 1],
[5, 120, 40, True, "RE", 1],
[3, 240, 80, False, "HS", 2],
[3, 200, 80, False, "HS", 1],
[3, 184, 80, False, "HS", 1],
[3, 184, 80, False, "HS", 1],
[3, 480, 112, True, "HS", 1],
[3, 672, 112, True, "HS", 1],
[5, 672, 160, True, "HS", 2],
[5, 960, 160, True, "HS", 1],
[5, 960, 160, True, "HS", 1],
]
elif mode == "small":
# refer to Table 2 in paper
mobile_setting = [
# k, exp, c, se, nl, s,
[3, 16, 16, True, "RE", 2],
[3, 72, 24, False, "RE", 2],
[3, 88, 24, False, "RE", 1],
[5, 96, 40, True, "HS", 2],
[5, 240, 40, True, "HS", 1],
[5, 240, 40, True, "HS", 1],
[5, 120, 48, True, "HS", 1],
[5, 144, 48, True, "HS", 1],
[5, 288, 96, True, "HS", 2],
[5, 576, 96, True, "HS", 1],
[5, 576, 96, True, "HS", 1],
]
else:
raise NotImplementedError
# building first layer
assert input_size % 32 == 0
last_channel = (
make_divisible(last_channel * width_mult)
if width_mult > 1.0
else last_channel
)
self.features = [conv_bn(3, input_channel, 2, nlin_layer=Hswish)]
self.classifier = []
# building mobile blocks
for k, exp, c, se, nl, s in mobile_setting:
output_channel = make_divisible(c * width_mult)
exp_channel = make_divisible(exp * width_mult)
self.features.append(
MobileBottleneck(
input_channel, output_channel, k, s, exp_channel, se, nl
)
)
input_channel = output_channel
# building last several layers
if mode == "large":
last_conv = make_divisible(960 * width_mult)
self.features.append(
conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)
)
self.features.append(nn.AdaptiveAvgPool2d(1))
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
self.features.append(Hswish(inplace=True))
elif mode == "small":
last_conv = make_divisible(576 * width_mult)
self.features.append(
conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)
)
# self.features.append(SEModule(last_conv)) # refer to paper Table2, but I think this is a mistake
self.features.append(nn.AdaptiveAvgPool2d(1))
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
self.features.append(Hswish(inplace=True))
else:
raise NotImplementedError
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(p=dropout), # refer to paper section 6
nn.Linear(last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
def mobilenetv3(pretrained=False, **kwargs):
model = MobileNetV3(**kwargs)
if pretrained:
state_dict = torch.load("mobilenetv3_small_67.4.pth.tar")
model.load_state_dict(state_dict, strict=True)
# raise NotImplementedError
return model
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/loss.py | threestudio/utils/GAN/loss.py | import torch
import torch.nn.functional as F
def generator_loss(discriminator, inputs, reconstructions, cond=None):
if cond is None:
logits_fake = discriminator(reconstructions.contiguous())
else:
logits_fake = discriminator(
torch.cat((reconstructions.contiguous(), cond), dim=1)
)
g_loss = -torch.mean(logits_fake)
return g_loss
def hinge_d_loss(logits_real, logits_fake):
loss_real = torch.mean(F.relu(1.0 - logits_real))
loss_fake = torch.mean(F.relu(1.0 + logits_fake))
d_loss = 0.5 * (loss_real + loss_fake)
return d_loss
def discriminator_loss(discriminator, inputs, reconstructions, cond=None):
if cond is None:
logits_real = discriminator(inputs.contiguous().detach())
logits_fake = discriminator(reconstructions.contiguous().detach())
else:
logits_real = discriminator(
torch.cat((inputs.contiguous().detach(), cond), dim=1)
)
logits_fake = discriminator(
torch.cat((reconstructions.contiguous().detach(), cond), dim=1)
)
d_loss = hinge_d_loss(logits_real, logits_fake).mean()
return d_loss
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/attention.py | threestudio/utils/GAN/attention.py | import math
from inspect import isfunction
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from torch import einsum, nn
from threestudio.utils.GAN.network_util import checkpoint
def exists(val):
return val is not None
def uniq(arr):
return {el: True for el in arr}.keys()
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def init_(tensor):
dim = tensor.shape[-1]
std = 1 / math.sqrt(dim)
tensor.uniform_(-std, std)
return tensor
# feedforward
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = (
nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
if not glu
else GEGLU(dim, inner_dim)
)
self.net = nn.Sequential(
project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)
)
def forward(self, x):
return self.net(x)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def Normalize(in_channels):
return torch.nn.GroupNorm(
num_groups=32, num_channels=in_channels, eps=1e-6, affine=True
)
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x)
q, k, v = rearrange(
qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3
)
k = k.softmax(dim=-1)
context = torch.einsum("bhdn,bhen->bhde", k, v)
out = torch.einsum("bhde,bhdn->bhen", context, q)
out = rearrange(
out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w
)
return self.to_out(out)
class SpatialSelfAttention(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.k = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.v = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
self.proj_out = torch.nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0
)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b, c, h, w = q.shape
q = rearrange(q, "b c h w -> b (h w) c")
k = rearrange(k, "b c h w -> b c (h w)")
w_ = torch.einsum("bij,bjk->bik", q, k)
w_ = w_ * (int(c) ** (-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = rearrange(v, "b c h w -> b c (h w)")
w_ = rearrange(w_, "b i j -> b j i")
h_ = torch.einsum("bij,bjk->bik", v, w_)
h_ = rearrange(h_, "b c (h w) -> b c h w", h=h)
h_ = self.proj_out(h_)
return x + h_
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.scale = dim_head**-0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
)
def forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v))
sim = einsum("b i d, b j d -> b i j", q, k) * self.scale
if exists(mask):
mask = rearrange(mask, "b ... -> b (...)")
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, "b j -> (b h) () j", h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum("b i j, b j d -> b i d", attn, v)
out = rearrange(out, "(b h) n d -> b n (h d)", h=h)
return self.to_out(out)
class BasicTransformerBlock(nn.Module):
def __init__(
self,
dim,
n_heads,
d_head,
dropout=0.0,
context_dim=None,
gated_ff=True,
checkpoint=True,
):
super().__init__()
self.attn1 = CrossAttention(
query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout
) # is a self-attention
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
self.attn2 = CrossAttention(
query_dim=dim,
context_dim=context_dim,
heads=n_heads,
dim_head=d_head,
dropout=dropout,
) # is self-attn if context is none
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.norm3 = nn.LayerNorm(dim)
self.checkpoint = checkpoint
def forward(self, x, context=None):
return checkpoint(
self._forward, (x, context), self.parameters(), self.checkpoint
)
def _forward(self, x, context=None):
x = self.attn1(self.norm1(x)) + x
x = self.attn2(self.norm2(x), context=context) + x
x = self.ff(self.norm3(x)) + x
return x
class SpatialTransformer(nn.Module):
"""
Transformer block for image-like data.
First, project the input (aka embedding)
and reshape to b, t, d.
Then apply standard transformer action.
Finally, reshape to image
"""
def __init__(
self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None
):
super().__init__()
self.in_channels = in_channels
inner_dim = n_heads * d_head
self.norm = Normalize(in_channels)
self.proj_in = nn.Conv2d(
in_channels, inner_dim, kernel_size=1, stride=1, padding=0
)
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim
)
for d in range(depth)
]
)
self.proj_out = zero_module(
nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
)
def forward(self, x, context=None):
# note: if no context is given, cross-attention defaults to self-attention
b, c, h, w = x.shape
x_in = x
x = self.norm(x)
x = self.proj_in(x)
x = rearrange(x, "b c h w -> b (h w) c")
for block in self.transformer_blocks:
x = block(x, context=context)
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
x = self.proj_out(x)
return x + x_in
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/utils/GAN/distribution.py | threestudio/utils/GAN/distribution.py | import numpy as np
import torch
class AbstractDistribution:
def sample(self):
raise NotImplementedError()
def mode(self):
raise NotImplementedError()
class DiracDistribution(AbstractDistribution):
def __init__(self, value):
self.value = value
def sample(self):
return self.value
def mode(self):
return self.value
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(
device=self.parameters.device
)
def sample(self):
x = self.mean + self.std * torch.randn(self.mean.shape).to(
device=self.parameters.device
)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(
torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
dim=[1, 2, 3],
)
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,
dim=[1, 2, 3],
)
def nll(self, sample, dims=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims,
)
def mode(self):
return self.mean
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for torch.exp().
logvar1, logvar2 = [
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ torch.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/latentnerf.py | threestudio/systems/latentnerf.py | from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import ShapeLoss, binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("latentnerf-system")
class LatentNeRF(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
guide_shape: Optional[str] = None
refinement: bool = False
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
if self.training or not self.cfg.refinement:
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
if self.cfg.guide_shape is not None:
self.shape_loss = ShapeLoss(self.cfg.guide_shape)
def forward(self, batch: Dict[str, Any], decode: bool = False) -> Dict[str, Any]:
render_out = self.renderer(**batch)
out = {
**render_out,
}
if decode:
if self.cfg.refinement:
out["decoded_rgb"] = out["comp_rgb"]
else:
out["decoded_rgb"] = self.guidance.decode_latents(
out["comp_rgb"].permute(0, 3, 1, 2)
).permute(0, 2, 3, 1)
return out
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
def training_step(self, batch, batch_idx):
out = self(batch)
prompt_utils = self.prompt_processor()
guidance_out = self.guidance(
out["comp_rgb"],
prompt_utils,
**batch,
rgb_as_latents=not self.cfg.refinement,
)
loss = 0.0
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
if (
self.cfg.guide_shape is not None
and self.C(self.cfg.loss.lambda_shape) > 0
and out["points"].shape[0] > 0
):
loss_shape = self.shape_loss(out["points"], out["density"])
self.log("train/loss_shape", loss_shape)
loss += loss_shape * self.C(self.cfg.loss.lambda_shape)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch, decode=True)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["decoded_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch, decode=True)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["decoded_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/mvdream.py | threestudio/systems/mvdream.py | import os
from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.misc import cleanup, get_device
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("mvdream-system")
class MVDreamSystem(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
visualize_samples: bool = False
cfg: Config
def configure(self) -> None:
# set up geometry, material, background, renderer
super().configure()
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
self.guidance.requires_grad_(False)
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.prompt_utils = self.prompt_processor()
def on_load_checkpoint(self, checkpoint):
for k in list(checkpoint["state_dict"].keys()):
if k.startswith("guidance."):
return
guidance_state_dict = {
"guidance." + k: v for (k, v) in self.guidance.state_dict().items()
}
checkpoint["state_dict"] = {**checkpoint["state_dict"], **guidance_state_dict}
return
def on_save_checkpoint(self, checkpoint):
for k in list(checkpoint["state_dict"].keys()):
if k.startswith("guidance."):
checkpoint["state_dict"].pop(k)
return
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
return self.renderer(**batch)
def training_step(self, batch, batch_idx):
out = self(batch)
guidance_out = self.guidance(out["comp_rgb"], self.prompt_utils, **batch)
loss = 0.0
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
if self.C(self.cfg.loss.lambda_sparsity) > 0:
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
if self.C(self.cfg.loss.lambda_opaque) > 0:
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
# z variance loss proposed in HiFA: http://arxiv.org/abs/2305.18766
# helps reduce floaters and produce solid geometry
if self.C(self.cfg.loss.lambda_z_variance) > 0:
loss_z_variance = out["z_variance"][out["opacity"] > 0.5].mean()
self.log("train/loss_z_variance", loss_z_variance)
loss += loss_z_variance * self.C(self.cfg.loss.lambda_z_variance)
if (
hasattr(self.cfg.loss, "lambda_eikonal")
and self.C(self.cfg.loss.lambda_eikonal) > 0
):
loss_eikonal = (
(torch.linalg.norm(out["sdf_grad"], ord=2, dim=-1) - 1.0) ** 2
).mean()
self.log("train/loss_eikonal", loss_eikonal)
loss += loss_eikonal * self.C(self.cfg.loss.lambda_eikonal)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
if "comp_rgb" in out
else []
)
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
(
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
if "comp_rgb" in out
else []
)
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/textmesh.py | threestudio/systems/textmesh.py | from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("textmesh-system")
class TextMesh(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
pass
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# only used in training
self.prompt_processor = threestudio.find(self.cfg.prompt_processor_type)(
self.cfg.prompt_processor
)
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
# initialize SDF
self.geometry.initialize_shape()
def training_step(self, batch, batch_idx):
out = self(batch)
prompt_utils = self.prompt_processor()
guidance_out = self.guidance(
out["comp_rgb"], prompt_utils, **batch, rgb_as_latents=False
)
loss = 0.0
for name, value in guidance_out.items():
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
loss_eikonal = (
(torch.linalg.norm(out["sdf_grad"], ord=2, dim=-1) - 1.0) ** 2
).mean()
self.log("train/loss_eikonal", loss_eikonal)
loss += loss_eikonal * self.C(self.cfg.loss.lambda_eikonal)
self.log("train/inv_std", out["inv_std"], prog_bar=True)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
liuff19/DreamReward | https://github.com/liuff19/DreamReward/blob/eeb5c648e6c2a25c8f6f8038edfe75d73c811614/threestudio/systems/zero123_simple.py | threestudio/systems/zero123_simple.py | from dataclasses import dataclass, field
import torch
import threestudio
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import *
@threestudio.register("zero123-simple-system")
class Zero123Simple(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
pass
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
def training_step(self, batch, batch_idx):
out = self(batch["random_camera"])
guidance_out = self.guidance(
out["comp_rgb"],
**batch["random_camera"],
rgb_as_latents=False,
)
loss = 0.0
for name, value in guidance_out.items():
if not (isinstance(value, torch.Tensor) and len(value.shape) > 0):
self.log(f"train/{name}", value)
if name.startswith("loss_"):
loss += value * self.C(self.cfg.loss[name.replace("loss_", "lambda_")])
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
loss_orient = (
out["weights"].detach()
* dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2
).sum() / (out["opacity"] > 0).sum()
self.log("train/loss_orient", loss_orient)
loss += loss_orient * self.C(self.cfg.loss.lambda_orient)
if self.C(self.cfg.loss.lambda_normal_smoothness_2d) > 0:
if "comp_normal" not in out:
raise ValueError(
"comp_normal is required for 2D normal smoothness loss, no comp_normal is found in the output."
)
normal = out["comp_normal"]
loss_normal_smoothness_2d = (
normal[:, 1:, :, :] - normal[:, :-1, :, :]
).square().mean() + (
normal[:, :, 1:, :] - normal[:, :, :-1, :]
).square().mean()
self.log("trian/loss_normal_smoothness_2d", loss_normal_smoothness_2d)
loss += loss_normal_smoothness_2d * self.C(
self.cfg.loss.lambda_normal_smoothness_2d
)
loss_sparsity = (out["opacity"] ** 2 + 0.01).sqrt().mean()
self.log("train/loss_sparsity", loss_sparsity)
loss += loss_sparsity * self.C(self.cfg.loss.lambda_sparsity)
opacity_clamped = out["opacity"].clamp(1.0e-3, 1.0 - 1.0e-3)
loss_opaque = binary_cross_entropy(opacity_clamped, opacity_clamped)
self.log("train/loss_opaque", loss_opaque)
loss += loss_opaque * self.C(self.cfg.loss.lambda_opaque)
for name, value in self.cfg.loss.items():
self.log(f"train_params/{name}", self.C(value))
if self.true_global_step % 50 == 0:
self.save_image_grid(
f"it{self.true_global_step}-train-t{int(guidance_out['timesteps'][0])}.png",
(
[
{
"type": "rgb",
"img": guidance_out["rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
if "rgb" in guidance_out
else []
)
+ (
[
{
"type": "rgb",
"img": guidance_out["rgb_1step_orig"][0],
"kwargs": {"data_format": "HWC"},
}
]
if "rgb_1step_orig" in guidance_out
else []
)
+ (
[
{
"type": "rgb",
"img": guidance_out["rgb_multistep_orig"][0],
"kwargs": {"data_format": "HWC"},
}
]
if "rgb_multistep_orig" in guidance_out
else []
),
)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="validation_step",
step=self.true_global_step,
)
def on_validation_epoch_end(self):
pass
def test_step(self, batch, batch_idx):
out = self(batch)
self.save_image_grid(
f"it{self.true_global_step}-test/{batch['index'][0]}.png",
[
{
"type": "rgb",
"img": out["comp_rgb"][0],
"kwargs": {"data_format": "HWC"},
},
]
+ (
[
{
"type": "rgb",
"img": out["comp_normal"][0],
"kwargs": {"data_format": "HWC", "data_range": (0, 1)},
}
]
if "comp_normal" in out
else []
)
+ [
{
"type": "grayscale",
"img": out["opacity"][0, :, :, 0],
"kwargs": {"cmap": None, "data_range": (0, 1)},
},
],
name="test_step",
step=self.true_global_step,
)
def on_test_epoch_end(self):
self.save_img_sequence(
f"it{self.true_global_step}-test",
f"it{self.true_global_step}-test",
"(\d+)\.png",
save_format="mp4",
fps=30,
name="test",
step=self.true_global_step,
)
| python | MIT | eeb5c648e6c2a25c8f6f8038edfe75d73c811614 | 2026-01-05T07:14:33.752935Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.