repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9
values |
|---|---|---|---|---|---|---|---|---|---|---|
ingra14m/Specular-Gaussians | scene/dataset_readers.py | [
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T... | import os
import sys
import numpy as np
import json
import imageio
import cv2 as cv
from PIL import Image
from typing import NamedTuple, Optional
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from glob import glob
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
from utils.camera_utils import camera_nerfies_from_JSON | 5,461 | else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics,
images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key=lambda x: x.image_name)
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
matrix = np.linalg.inv(np.array(frame["transform_matrix"]))
R = -np.transpose(matrix[:3, :3])
R[:, 0] = -R[:, 0]
T = -matrix[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
# depth = imageio.imread(depth_name)
im_data = np.array(image.convert("RGBA"))
bg = np.array([1, 1, 1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:, :, :3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr * 255.0, dtype=np.byte), "RGB")
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
depth: Optional[np.array] = None
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return K, pose
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
num_frames = len(cam_extrinsics)
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx + 1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model == "SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model == "PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics,
images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key=lambda x: x.image_name)
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
matrix = np.linalg.inv(np.array(frame["transform_matrix"]))
R = -np.transpose(matrix[:3, :3])
R[:, 0] = -R[:, 0]
T = -matrix[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
# depth = imageio.imread(depth_name)
im_data = np.array(image.convert("RGBA"))
bg = np.array([1, 1, 1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:, :, :3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr * 255.0, dtype=np.byte), "RGB")
| fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) | 9 | 2023-12-12 14:59:01+00:00 | 8k |
Artiprocher/DiffSynth-Studio | diffsynth/extensions/FastBlend/api.py | [
{
"identifier": "AccurateModeRunner",
"path": "diffsynth/extensions/FastBlend/runners/accurate.py",
"snippet": "class AccurateModeRunner:\n def __init__(self):\n pass\n\n def run(self, frames_guide, frames_style, batch_size, window_size, ebsynth_config, desc=\"Accurate Mode\", save_path=Non... | from .runners import AccurateModeRunner, FastModeRunner, BalancedModeRunner, InterpolationModeRunner, InterpolationModeSingleFrameRunner
from .data import VideoData, get_video_fps, save_video, search_for_images
import os
import gradio as gr | 4,073 |
def check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder):
frames_guide = VideoData(video_guide, video_guide_folder)
frames_style = VideoData(video_style, video_style_folder)
message = ""
if len(frames_guide) < len(frames_style):
message += f"The number of frames mismatches. Only the first {len(frames_guide)} frames of style video will be used.\n"
frames_style.set_length(len(frames_guide))
elif len(frames_guide) > len(frames_style):
message += f"The number of frames mismatches. Only the first {len(frames_style)} frames of guide video will be used.\n"
frames_guide.set_length(len(frames_style))
height_guide, width_guide = frames_guide.shape()
height_style, width_style = frames_style.shape()
if height_guide != height_style or width_guide != width_style:
message += f"The shape of frames mismatches. The frames in style video will be resized to (height: {height_guide}, width: {width_guide})\n"
frames_style.set_shape(height_guide, width_guide)
return frames_guide, frames_style, message
def smooth_video(
video_guide,
video_guide_folder,
video_style,
video_style_folder,
mode,
window_size,
batch_size,
tracking_window_size,
output_path,
fps,
minimum_patch_size,
num_iter,
guide_weight,
initialize,
progress = None,
):
# input
frames_guide, frames_style, message = check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder)
if len(message) > 0:
print(message)
# output
if output_path == "":
if video_style is None:
output_path = os.path.join(video_style_folder, "output")
else:
output_path = os.path.join(os.path.split(video_style)[0], "output")
os.makedirs(output_path, exist_ok=True)
print("No valid output_path. Your video will be saved here:", output_path)
elif not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
print("Your video will be saved here:", output_path)
frames_path = os.path.join(output_path, "frames")
video_path = os.path.join(output_path, "video.mp4")
os.makedirs(frames_path, exist_ok=True)
# process
if mode == "Fast" or mode == "Balanced":
tracking_window_size = 0
ebsynth_config = {
"minimum_patch_size": minimum_patch_size,
"threads_per_block": 8,
"num_iter": num_iter,
"gpu_id": 0,
"guide_weight": guide_weight,
"initialize": initialize,
"tracking_window_size": tracking_window_size,
}
if mode == "Fast":
|
def check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder):
frames_guide = VideoData(video_guide, video_guide_folder)
frames_style = VideoData(video_style, video_style_folder)
message = ""
if len(frames_guide) < len(frames_style):
message += f"The number of frames mismatches. Only the first {len(frames_guide)} frames of style video will be used.\n"
frames_style.set_length(len(frames_guide))
elif len(frames_guide) > len(frames_style):
message += f"The number of frames mismatches. Only the first {len(frames_style)} frames of guide video will be used.\n"
frames_guide.set_length(len(frames_style))
height_guide, width_guide = frames_guide.shape()
height_style, width_style = frames_style.shape()
if height_guide != height_style or width_guide != width_style:
message += f"The shape of frames mismatches. The frames in style video will be resized to (height: {height_guide}, width: {width_guide})\n"
frames_style.set_shape(height_guide, width_guide)
return frames_guide, frames_style, message
def smooth_video(
video_guide,
video_guide_folder,
video_style,
video_style_folder,
mode,
window_size,
batch_size,
tracking_window_size,
output_path,
fps,
minimum_patch_size,
num_iter,
guide_weight,
initialize,
progress = None,
):
# input
frames_guide, frames_style, message = check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder)
if len(message) > 0:
print(message)
# output
if output_path == "":
if video_style is None:
output_path = os.path.join(video_style_folder, "output")
else:
output_path = os.path.join(os.path.split(video_style)[0], "output")
os.makedirs(output_path, exist_ok=True)
print("No valid output_path. Your video will be saved here:", output_path)
elif not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
print("Your video will be saved here:", output_path)
frames_path = os.path.join(output_path, "frames")
video_path = os.path.join(output_path, "video.mp4")
os.makedirs(frames_path, exist_ok=True)
# process
if mode == "Fast" or mode == "Balanced":
tracking_window_size = 0
ebsynth_config = {
"minimum_patch_size": minimum_patch_size,
"threads_per_block": 8,
"num_iter": num_iter,
"gpu_id": 0,
"guide_weight": guide_weight,
"initialize": initialize,
"tracking_window_size": tracking_window_size,
}
if mode == "Fast": | FastModeRunner().run(frames_guide, frames_style, batch_size=batch_size, window_size=window_size, ebsynth_config=ebsynth_config, save_path=frames_path) | 1 | 2023-12-07 16:52:15+00:00 | 8k |
vikhyat/mixtral-inference | mixtral/model.py | [
{
"identifier": "precompute_freqs_cis",
"path": "mixtral/rope.py",
"snippet": "def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0) -> torch.Tensor:\n freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))\n t = torch.arange(end, device=freqs.device) # type... | import torch
import json
from torch import nn
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
from mixtral.rope import precompute_freqs_cis, apply_rotary_emb
from mixtral.cache import CacheView, RotatingBufferCache
from xformers.ops.fmha import (
memory_efficient_attention,
) | 4,389 |
self.experts = torch.nn.ModuleList(
[FeedForwardExpert(args, device=device, dtype=dtype)
for _ in range(args.moe['num_experts'])]
)
def forward(self, x) -> torch.Tensor:
g = self.gate(x)
g = torch.softmax(g, dim=-1)
weights, expert_indices = torch.topk(g, 2, dim=-1)
weights /= weights.sum(dim=-1, keepdim=True)
result = torch.zeros_like(x)
for batch in range(x.shape[0]):
w_b, ei_b = weights[batch], expert_indices[batch]
for i, w in zip(ei_b, w_b):
result[batch] += w * self.experts[i](x[batch])
return result
class FeedForwardExpert(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.w1 = nn.Linear(
args.dim,
args.hidden_dim,
bias=False,
device='meta',
dtype=dtype
)
self.w1.to_empty(device=device)
self.w2 = nn.Linear(
args.hidden_dim,
args.dim,
bias=False,
device='meta',
dtype=dtype
)
self.w2.to_empty(device=device)
self.w3 = nn.Linear(
args.dim,
args.hidden_dim,
bias=False,
device='meta',
dtype=dtype
)
self.w3.to_empty(device=device)
def forward(self, x) -> torch.Tensor:
return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x))
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
class TransformerBlock(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.attention = Attention(args, device=device, dtype=dtype)
self.feed_forward = FeedForward(args=args, device=device, dtype=dtype)
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype)
self.args = args
def forward(
self, x: torch.Tensor, freqs_cis: torch.Tensor, cache: Optional[CacheView]
) -> torch.Tensor:
x = x.to(self.attention_norm.weight.device)
freqs_cis = freqs_cis.to(self.attention_norm.weight.device)
r = self.attention.forward(self.attention_norm(x), freqs_cis, cache)
h = x + r
r = self.feed_forward.forward(self.ffn_norm(h))
out = h + r
return out
class Transformer(nn.Module):
def __init__(self, args: ModelArgs, devices: List[str], dtype=torch.float16):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.n_layers = args.n_layers
assert self.vocab_size > 0
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim, device='meta', dtype=dtype)
self.tok_embeddings.to_empty(device=devices[0])
self.layers = torch.nn.ModuleList(
[
TransformerBlock(args=args, device=devices[(i * len(devices)) // args.n_layers], dtype=dtype)
for i in range(args.n_layers)
]
)
self.norm = RMSNorm(args.dim, eps=args.norm_eps).to(devices[0], dtype=dtype)
self.output = nn.Linear(
args.dim,
args.vocab_size,
bias=False,
device='meta',
dtype=dtype
)
self.output.to_empty(device=devices[0])
|
@dataclass
class MoeArgs:
num_experts_per_tok: int
num_experts: int
@dataclass
class ModelArgs:
dim: int
n_layers: int
head_dim: int
hidden_dim: int
n_heads: int
n_kv_heads: int
norm_eps: float
vocab_size: int
moe: MoeArgs
max_batch_size: int = 0
@dataclass
class SimpleInputMetadata:
# rope absolute positions
positions: torch.Tensor
@staticmethod
def from_seqlens(seqlens: List[int], device: torch.device) -> "SimpleInputMetadata":
return SimpleInputMetadata(
positions = torch.cat(
[torch.arange(0, seqlen) for seqlen in seqlens]
).to(device=device, dtype=torch.long)
)
def repeat_kv(keys: torch.Tensor, values: torch.Tensor, repeats: int, dim: int):
keys = torch.repeat_interleave(keys, repeats=repeats, dim=dim)
values = torch.repeat_interleave(values, repeats=repeats, dim=dim)
return keys, values
class Attention(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.args = args
self.n_heads: int = args.n_heads
self.n_kv_heads: int = args.n_kv_heads
self.repeats = self.n_heads // self.n_kv_heads
self.scale = self.args.head_dim**-0.5
self.wq = nn.Linear(
args.dim,
args.n_heads * args.head_dim,
bias=False,
device='meta',
dtype=dtype
)
self.wq.to_empty(device=device)
self.wk = nn.Linear(
args.dim,
args.n_kv_heads * args.head_dim,
bias=False,
device='meta',
dtype=dtype
)
self.wk.to_empty(device=device)
self.wv = nn.Linear(
args.dim,
args.n_kv_heads * args.head_dim,
bias=False,
device='meta',
dtype=dtype
)
self.wv.to_empty(device=device)
self.wo = nn.Linear(
args.n_heads * args.head_dim,
args.dim,
bias=False,
device='meta',
dtype=dtype
)
self.wo.to_empty(device=device)
def forward(
self, x: torch.Tensor,
freqs_cis: torch.Tensor,
cache: Optional[CacheView],
) -> torch.Tensor:
seqlen_sum, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(seqlen_sum, self.n_heads, self.args.head_dim)
xk = xk.view(seqlen_sum, self.n_kv_heads, self.args.head_dim)
xv = xv.view(seqlen_sum, self.n_kv_heads, self.args.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
xk = xk.to('cuda:0')
xv = xv.to('cuda:0')
if cache is None:
key, val = xk, xv
elif cache.prefill:
key, val = cache.interleave_kv(xk, xv)
cache.update(xk, xv)
else:
cache.update(xk, xv)
key, val = cache.key, cache.value
key = key.view(seqlen_sum * cache.sliding_window, self.n_kv_heads, self.args.head_dim)
val = val.view(seqlen_sum * cache.sliding_window, self.n_kv_heads, self.args.head_dim)
key, val = key.to(x.device), val.to(x.device)
# Repeat keys and values to match number of query heads
key, val = repeat_kv(key, val, self.repeats, dim=1)
# xformers requires (B=1, S, H, D)
xq, key, val = xq[None, ...], key[None, ...], val[None, ...]
output = memory_efficient_attention(xq, key, val, None if cache is None else cache.mask)
return self.wo(output.view_as(x))
class FeedForward(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.gate = nn.Linear(args.dim, args.moe['num_experts'],
bias=False, device='meta', dtype=dtype)
self.gate.to_empty(device=device)
self.experts = torch.nn.ModuleList(
[FeedForwardExpert(args, device=device, dtype=dtype)
for _ in range(args.moe['num_experts'])]
)
def forward(self, x) -> torch.Tensor:
g = self.gate(x)
g = torch.softmax(g, dim=-1)
weights, expert_indices = torch.topk(g, 2, dim=-1)
weights /= weights.sum(dim=-1, keepdim=True)
result = torch.zeros_like(x)
for batch in range(x.shape[0]):
w_b, ei_b = weights[batch], expert_indices[batch]
for i, w in zip(ei_b, w_b):
result[batch] += w * self.experts[i](x[batch])
return result
class FeedForwardExpert(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.w1 = nn.Linear(
args.dim,
args.hidden_dim,
bias=False,
device='meta',
dtype=dtype
)
self.w1.to_empty(device=device)
self.w2 = nn.Linear(
args.hidden_dim,
args.dim,
bias=False,
device='meta',
dtype=dtype
)
self.w2.to_empty(device=device)
self.w3 = nn.Linear(
args.dim,
args.hidden_dim,
bias=False,
device='meta',
dtype=dtype
)
self.w3.to_empty(device=device)
def forward(self, x) -> torch.Tensor:
return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x))
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
class TransformerBlock(nn.Module):
def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16):
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.attention = Attention(args, device=device, dtype=dtype)
self.feed_forward = FeedForward(args=args, device=device, dtype=dtype)
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype)
self.args = args
def forward(
self, x: torch.Tensor, freqs_cis: torch.Tensor, cache: Optional[CacheView]
) -> torch.Tensor:
x = x.to(self.attention_norm.weight.device)
freqs_cis = freqs_cis.to(self.attention_norm.weight.device)
r = self.attention.forward(self.attention_norm(x), freqs_cis, cache)
h = x + r
r = self.feed_forward.forward(self.ffn_norm(h))
out = h + r
return out
class Transformer(nn.Module):
def __init__(self, args: ModelArgs, devices: List[str], dtype=torch.float16):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.n_layers = args.n_layers
assert self.vocab_size > 0
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim, device='meta', dtype=dtype)
self.tok_embeddings.to_empty(device=devices[0])
self.layers = torch.nn.ModuleList(
[
TransformerBlock(args=args, device=devices[(i * len(devices)) // args.n_layers], dtype=dtype)
for i in range(args.n_layers)
]
)
self.norm = RMSNorm(args.dim, eps=args.norm_eps).to(devices[0], dtype=dtype)
self.output = nn.Linear(
args.dim,
args.vocab_size,
bias=False,
device='meta',
dtype=dtype
)
self.output.to_empty(device=devices[0])
| self.freqs_cis = precompute_freqs_cis(self.args.head_dim, 128_000, 1e6).to(devices[0]) | 0 | 2023-12-08 22:48:32+00:00 | 8k |
u2seg/U2Seg | detectron2/evaluation/sem_seg_evaluation.py | [
{
"identifier": "DatasetCatalog",
"path": "detectron2/data/catalog.py",
"snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove... | import itertools
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
import torch
import cv2 # noqa
from collections import OrderedDict
from typing import Optional, Union
from PIL import Image
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator | 3,646 | outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=int)
gt_filename = self.input_file_to_gt_file[input["file_name"]]
gt = self.sem_seg_loading_fn(gt_filename, dtype=int)
# here for hungarian_matching
self.do_hangarain_mapping(coco_results=pred, gt=gt) # this is transfer the pred to
# # # transfer to supercategory
# gt[gt == self._ignore_label] = self._num_classes
# mapping_dict = json.load(
# open('/home/niudt/detectron2/tools/hungarain_matching/cocotrain_300/semantic_mapping.json'))
# for cls in mapping_dict:
# # cls = int(_cls)
# if mapping_dict[cls] == -1:
# pred[pred == int(cls)] = 0 # self._num_classes
# else:
# pred[pred == int(cls)] = mapping_dict[cls]
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._compute_boundary_iou:
b_gt = self._mask_to_boundary(gt.astype(np.uint8))
b_pred = self._mask_to_boundary(pred.astype(np.uint8))
self._b_conf_matrix += np.bincount(
(self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=int)
gt_filename = self.input_file_to_gt_file[input["file_name"]]
gt = self.sem_seg_loading_fn(gt_filename, dtype=int)
# transfer to supercategory
# self._num_classes = 28
_gt = self.transfer(gt)
_gt[_gt == self._ignore_label] = 16#self._num_classes
# here for hungarian_matching
self.do_hangarain_mapping(coco_results=pred, gt=_gt, save_path='/home/niudt/u2seg_test/detectron2/tools/seg.json')
# # transfer to supercategory
# # self._num_classes = 28
# gt = self.transfer(gt)
# gt[gt == self._ignore_label] = 16 # self._num_classes
# mapping_dict = json.load(open('/home/niudt/detectron2/tools/3x_800_cocotrain_test/semantic_mapping.json'))
# for cls in mapping_dict:
# # cls = int(_cls)
# if mapping_dict[cls] == -1:
# pred[pred == int(cls)] = self._num_classes
# else:
# pred[pred == int(cls)] = mapping_dict[cls]
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._compute_boundary_iou:
b_gt = self._mask_to_boundary(gt.astype(np.uint8))
b_pred = self._mask_to_boundary(pred.astype(np.uint8))
self._b_conf_matrix += np.bincount(
(self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
mapping_dict = self.hungarain_matching(all_preds=np.array(self.pred_det_cate),
all_targets=np.array(self.pseudo_gt_cate),
num_classes=15, num_labeled=27)
print(mapping_dict)
# # # save the mapping dict
save_root = '/home/niudt/u2seg_test/detectron2/tools/hungarain_matching/cocotrain_300'
os.makedirs(save_root,exist_ok=True)
save_path = os.path.join(save_root, 'semantic_mapping.json')
with open(save_path, 'w',
encoding='utf-8') as f:
json.dump(mapping_dict, f, ensure_ascii=False)
assert 1 == 0
if self._distributed:
synchronize()
| # Copyright (c) Facebook, Inc. and its affiliates.
_CV2_IMPORTED = True
try:
except ImportError:
# OpenCV is an optional dependency at the moment
_CV2_IMPORTED = False
def load_image_into_numpy_array(
filename: str,
copy: bool = False,
dtype: Optional[Union[np.dtype, str]] = None,
) -> np.ndarray:
with PathManager.open(filename, "rb") as f:
array = np.array(Image.open(f), copy=copy, dtype=dtype)
return array
class SemSegEvaluator(DatasetEvaluator):
"""
Evaluate semantic segmentation metrics.
"""
def __init__(
self,
dataset_name,
distributed=True,
output_dir=None,
*,
sem_seg_loading_fn=load_image_into_numpy_array,
num_classes=None,
ignore_label=None,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
distributed (bool): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): an output directory to dump results.
sem_seg_loading_fn: function to read sem seg file and load into numpy array.
Default provided, but projects can customize.
num_classes, ignore_label: deprecated argument
"""
self._logger = logging.getLogger(__name__)
if num_classes is not None:
self._logger.warn(
"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
)
if ignore_label is not None:
self._logger.warn(
"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
)
self._dataset_name = dataset_name
self._distributed = distributed
self._output_dir = output_dir
self._cpu_device = torch.device("cpu")
self.input_file_to_gt_file = {
dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
for dataset_record in DatasetCatalog.get(dataset_name)
}
meta = MetadataCatalog.get(dataset_name)
# Dict that maps contiguous training ids to COCO category ids
try:
c2d = meta.stuff_dataset_id_to_contiguous_id
self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
except AttributeError:
self._contiguous_id_to_dataset_id = None
self._class_names = meta.stuff_classes
self.sem_seg_loading_fn = sem_seg_loading_fn
self._num_classes = len(meta.stuff_classes)
if num_classes is not None:
assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
# This is because cv2.erode did not work for int datatype. Only works for uint8.
self._compute_boundary_iou = True
if not _CV2_IMPORTED:
self._compute_boundary_iou = False
self._logger.warn(
"""Boundary IoU calculation requires OpenCV. B-IoU metrics are
not going to be computed because OpenCV is not available to import."""
)
if self._num_classes >= np.iinfo(np.uint8).max:
self._compute_boundary_iou = False
self._logger.warn(
f"""SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!
B-IoU metrics are not going to be computed. Max allowed value (exclusive)
for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.
The number of classes of dataset {self._dataset_name} is {self._num_classes}"""
)
def reset(self):
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
self._b_conf_matrix = np.zeros(
(self._num_classes + 1, self._num_classes + 1), dtype=np.int64
)
self._predictions = []
def do_hangarain_mapping(self, coco_results, gt):
# here do the hungarian matching
# create gt mapping dict
# dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
# gt_cate_mapping = {k: v for k, v in dataset_id_to_contiguous_id.items()}
# do the box matching based computing IOU, create pseudo gt
pred_num_mask = np.unique(coco_results)
gt_num_mask = np.unique(gt)
for pred in pred_num_mask:
if pred == 0:
continue
mask_pred = (coco_results == pred)
for _gt in gt_num_mask:
if _gt == 0 or _gt == 255:
continue
mask_gt = (gt == _gt)
iou = np.sum((mask_pred * mask_gt)) / np.sum((mask_pred + mask_gt))
if iou > 0.45: # TODO: find that thresh
self.pseudo_gt_cate.append(_gt)
self.pred_det_cate.append(pred)
continue
def process_cityscapes(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=int)
gt_filename = self.input_file_to_gt_file[input["file_name"]]
gt = self.sem_seg_loading_fn(gt_filename, dtype=int)
# here for hungarian_matching
self.do_hangarain_mapping(coco_results=pred, gt=gt) # this is transfer the pred to
# # # transfer to supercategory
# gt[gt == self._ignore_label] = self._num_classes
# mapping_dict = json.load(
# open('/home/niudt/detectron2/tools/hungarain_matching/cocotrain_300/semantic_mapping.json'))
# for cls in mapping_dict:
# # cls = int(_cls)
# if mapping_dict[cls] == -1:
# pred[pred == int(cls)] = 0 # self._num_classes
# else:
# pred[pred == int(cls)] = mapping_dict[cls]
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._compute_boundary_iou:
b_gt = self._mask_to_boundary(gt.astype(np.uint8))
b_pred = self._mask_to_boundary(pred.astype(np.uint8))
self._b_conf_matrix += np.bincount(
(self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=int)
gt_filename = self.input_file_to_gt_file[input["file_name"]]
gt = self.sem_seg_loading_fn(gt_filename, dtype=int)
# transfer to supercategory
# self._num_classes = 28
_gt = self.transfer(gt)
_gt[_gt == self._ignore_label] = 16#self._num_classes
# here for hungarian_matching
self.do_hangarain_mapping(coco_results=pred, gt=_gt, save_path='/home/niudt/u2seg_test/detectron2/tools/seg.json')
# # transfer to supercategory
# # self._num_classes = 28
# gt = self.transfer(gt)
# gt[gt == self._ignore_label] = 16 # self._num_classes
# mapping_dict = json.load(open('/home/niudt/detectron2/tools/3x_800_cocotrain_test/semantic_mapping.json'))
# for cls in mapping_dict:
# # cls = int(_cls)
# if mapping_dict[cls] == -1:
# pred[pred == int(cls)] = self._num_classes
# else:
# pred[pred == int(cls)] = mapping_dict[cls]
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._compute_boundary_iou:
b_gt = self._mask_to_boundary(gt.astype(np.uint8))
b_pred = self._mask_to_boundary(pred.astype(np.uint8))
self._b_conf_matrix += np.bincount(
(self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
mapping_dict = self.hungarain_matching(all_preds=np.array(self.pred_det_cate),
all_targets=np.array(self.pseudo_gt_cate),
num_classes=15, num_labeled=27)
print(mapping_dict)
# # # save the mapping dict
save_root = '/home/niudt/u2seg_test/detectron2/tools/hungarain_matching/cocotrain_300'
os.makedirs(save_root,exist_ok=True)
save_path = os.path.join(save_root, 'semantic_mapping.json')
with open(save_path, 'w',
encoding='utf-8') as f:
json.dump(mapping_dict, f, ensure_ascii=False)
assert 1 == 0
if self._distributed:
synchronize() | conf_matrix_list = all_gather(self._conf_matrix) | 1 | 2023-12-05 01:13:31+00:00 | 8k |
upfusion3d/upfusion | diffusion/pipeline_control_net.py | [
{
"identifier": "create_model",
"path": "control_net/cldm/model.py",
"snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path)\n model = instantiate_from_config(config.model).cpu()\n print(f'Loaded model config from [{config_path}]')\n return model"
},
{
"iden... | import torch
import torch.nn as nn
from control_net.cldm.model import create_model, load_state_dict
from control_net.ldm.models.diffusion.ddim import DDIMSampler | 5,730 |
class DiffusionPipelineCN(nn.Module):
def __init__(self, cfg, srt_model=None, dino_model=None):
super().__init__()
self.cfg = cfg
self.control_net_model_config_path = self.cfg.control_net_model_config_path
self.prompt_color = self.cfg.control_net_prompt_color
self._setup_model()
self.srt_model = srt_model
self.dino_model = dino_model
self.cond_type = self.cfg.cond_type
if self.cond_type == "DF":
self._create_batch_dict_fn = self._create_batch_dict_df
self._maybe_dropout_condition_fn = self._maybe_dropout_condition_df
elif self.cond_type == "SLT":
self._create_batch_dict_fn = self._create_batch_dict_slt
self._maybe_dropout_condition_fn = self._maybe_dropout_condition_slt
elif self.cond_type == "DF+SLT":
self._create_batch_dict_fn = self._create_batch_dict_dfslt
self._maybe_dropout_condition_fn = self._maybe_dropout_condition_dfslt
else:
raise ValueError
def _setup_model(self):
|
class DiffusionPipelineCN(nn.Module):
def __init__(self, cfg, srt_model=None, dino_model=None):
super().__init__()
self.cfg = cfg
self.control_net_model_config_path = self.cfg.control_net_model_config_path
self.prompt_color = self.cfg.control_net_prompt_color
self._setup_model()
self.srt_model = srt_model
self.dino_model = dino_model
self.cond_type = self.cfg.cond_type
if self.cond_type == "DF":
self._create_batch_dict_fn = self._create_batch_dict_df
self._maybe_dropout_condition_fn = self._maybe_dropout_condition_df
elif self.cond_type == "SLT":
self._create_batch_dict_fn = self._create_batch_dict_slt
self._maybe_dropout_condition_fn = self._maybe_dropout_condition_slt
elif self.cond_type == "DF+SLT":
self._create_batch_dict_fn = self._create_batch_dict_dfslt
self._maybe_dropout_condition_fn = self._maybe_dropout_condition_dfslt
else:
raise ValueError
def _setup_model(self):
| model = create_model(self.cfg.control_net_model_config_path).cpu() | 0 | 2023-12-12 00:49:11+00:00 | 8k |
modelscope/normal-depth-diffusion | libs/omnidata_torch/lib/midas_31/api.py | [
{
"identifier": "DPTDepthModel",
"path": "libs/omnidata_torch/lib/midas_31/midas/dpt_depth.py",
"snippet": "class DPTDepthModel(DPT):\n def __init__(self, path=None, non_negative=True, **kwargs):\n features = kwargs[\"features\"] if \"features\" in kwargs else 256\n head_features_1 = kw... | import cv2
import os
import torch
import torch.nn as nn
from torchvision.transforms import Compose
from .midas.dpt_depth import DPTDepthModel
from .midas.midas_net import MidasNet
from .midas.midas_net_custom import MidasNet_small
from .midas.transforms import Resize, NormalizeImage, PrepareForNet
from basicsr.utils.download_util import load_file_from_url | 4,170 | # based on https://github.com/isl-org/MiDaS
annotator_ckpts_path = './libs/omnidata_torch/pretrained_models/'
ISL_PATHS = {
"dpt_beit_large_512": os.path.join(annotator_ckpts_path, "dpt_beit_large_512.pt"),
"dpt_beit_large_384": os.path.join(annotator_ckpts_path, "dpt_beit_large_384.pt"),
"dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"),
"dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
}
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/dpt_hybrid-midas-501f0c75.pt"
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def load_midas_transform(model_type):
# https://github.com/isl-org/MiDaS/blob/master/run.py
# load transform only
if model_type == "dpt_large": # DPT-Large
net_w, net_h = 384, 384
resize_mode = "minimal"
| # based on https://github.com/isl-org/MiDaS
annotator_ckpts_path = './libs/omnidata_torch/pretrained_models/'
ISL_PATHS = {
"dpt_beit_large_512": os.path.join(annotator_ckpts_path, "dpt_beit_large_512.pt"),
"dpt_beit_large_384": os.path.join(annotator_ckpts_path, "dpt_beit_large_384.pt"),
"dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"),
"dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
}
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/dpt_hybrid-midas-501f0c75.pt"
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def load_midas_transform(model_type):
# https://github.com/isl-org/MiDaS/blob/master/run.py
# load transform only
if model_type == "dpt_large": # DPT-Large
net_w, net_h = 384, 384
resize_mode = "minimal" | normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) | 4 | 2023-12-06 07:29:34+00:00 | 8k |
facebookresearch/DCI | reproduction/crowdsourcing/annotate/preprocessing/preprocess_assets_segev.py | [
{
"identifier": "get_groups_simple",
"path": "reproduction/crowdsourcing/annotate/preprocessing/mask_creation_utils.py",
"snippet": "TARGET_STEP = 100\nSKIP_LOGGING = True\nclass GroupItem(TypedDict):\nclass FinalGroup(TypedDict):\ndef jitter(size: float) -> float:\ndef bound(v, lo, hi):\ndef _load_fina... | import time
import sys
import numpy as np
import os
import base64
import cv2
import json
from segment_anything import sam_model_registry
from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator
from .mask_creation_utils import get_groups_simple, refine_groups_simple, FinalGrouping, FinalGroup, get_points_from_canny_greedy
from .efficient_mask import EfficientMask
from PIL import Image
from io import BytesIO
from typing import TypedDict, List | 4,285 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
LOW = 5000 # Low value into the images array to start at
HIGH = 12000 # High value in images array to go to
SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in
ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images")
OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks")
class SAMResult(TypedDict):
segmentation: np.ndarray # the mask itself
bbox: List[float] #XYWH of the mask
area: int # area of the mask
predicted_iou: float # model predicted quality
point_coords: List[List[float]] # coords of this point
stability_score: float # model stability score
crop_box: List[float] # image crop used to generate this mask, XYWH
def fold_group_tree(g: FinalGrouping):
def fold_group(subg: FinalGroup):
outer_mask = subg['outer_mask']
mask_img = Image.fromarray(np.uint8(outer_mask.mask * 255)) # type: ignore
mask_img = mask_img.convert('1')
maskbuf = BytesIO()
mask_img.save(maskbuf, format='png', bits=1, optimize=True)
mask_bytes = maskbuf.getvalue()
as_base64 = base64.b64encode(mask_bytes)
as_str = as_base64.decode('utf-8')
(t, l), (b, r) = subg['outer_mask'].get_tlbr()
return {
'outer_mask': as_str,
'area': int(outer_mask.get_size()),
'bounds': ((int(t), int(l)), (int(b), int(r))),
'subgroups': {
idx: fold_group(subsubg) for (idx, subsubg) in subg['subgroups'].items()
}
}
return {
idx: fold_group(subg) for (idx, subg) in g.items()
}
def group_outputs(outputs: List[SAMResult]) -> FinalGrouping:
as_efficient_masks: List[EfficientMask] = [
EfficientMask(
res['segmentation'],
res['predicted_iou'] * (res['stability_score'] ** 2),
size=res['area'],
) for res in outputs
]
in_order = sorted(as_efficient_masks, key=lambda x: x.get_size(), reverse=True)
return get_groups_simple(in_order)
def main():
all_images = os.listdir(SOURCE_DIR)
target_images = all_images[LOW:HIGH]
sam_checkpoint = SETEV_MODEL_ROOT
model_type = "vit_h"
device = "cuda"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
generator = SamAutomaticMaskGenerator(
sam,
points_per_side = 50,
points_per_batch = 64,
pred_iou_thresh = 0.8,
stability_score_thresh = 0.94,
stability_score_offset = 1.0,
box_nms_thresh = 0.97,
min_mask_region_area = 1000,
output_mode = "binary_mask",
)
first_start = time.time()
for idx, img in enumerate(target_images):
try:
start_time = time.time()
path = os.path.join(SOURCE_DIR, img)
img_array = cv2.imread(path)
img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)
canny_points = get_points_from_canny_greedy(img_array, distance_threshold=12, jitter_amount=35, num_extra=8)
if len(canny_points) == 0:
canny_results = []
print(f"[{time.time() - first_start}] No canny points for image {idx+LOW} : {time.time() - start_time}")
else:
points_for_sam = np.array([
[pt[1]/img_array.shape[1], pt[0]/img_array.shape[0]] for pt in canny_points
])
canny_generator = SamAutomaticMaskGenerator(
sam,
points_per_side=None,
point_grids=points_for_sam,
points_per_batch = 64,
pred_iou_thresh = 0.8,
stability_score_thresh = 0.94,
stability_score_offset = 1.0,
box_nms_thresh = 0.97,
min_mask_region_area = 1000,
output_mode = "binary_mask",
)
canny_results = canny_generator.generate(img_array)
print(f"[{time.time() - first_start}] SA canny compute time for image {idx+LOW} : {time.time() - start_time}")
result = generator.generate(img_array)
print(f"[{time.time() - first_start}] SA compute time for image {idx+LOW} : {time.time() - start_time}")
result += canny_results
grouped = group_outputs(result)
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
LOW = 5000 # Low value into the images array to start at
HIGH = 12000 # High value in images array to go to
SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in
ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images")
OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks")
class SAMResult(TypedDict):
segmentation: np.ndarray # the mask itself
bbox: List[float] #XYWH of the mask
area: int # area of the mask
predicted_iou: float # model predicted quality
point_coords: List[List[float]] # coords of this point
stability_score: float # model stability score
crop_box: List[float] # image crop used to generate this mask, XYWH
def fold_group_tree(g: FinalGrouping):
def fold_group(subg: FinalGroup):
outer_mask = subg['outer_mask']
mask_img = Image.fromarray(np.uint8(outer_mask.mask * 255)) # type: ignore
mask_img = mask_img.convert('1')
maskbuf = BytesIO()
mask_img.save(maskbuf, format='png', bits=1, optimize=True)
mask_bytes = maskbuf.getvalue()
as_base64 = base64.b64encode(mask_bytes)
as_str = as_base64.decode('utf-8')
(t, l), (b, r) = subg['outer_mask'].get_tlbr()
return {
'outer_mask': as_str,
'area': int(outer_mask.get_size()),
'bounds': ((int(t), int(l)), (int(b), int(r))),
'subgroups': {
idx: fold_group(subsubg) for (idx, subsubg) in subg['subgroups'].items()
}
}
return {
idx: fold_group(subg) for (idx, subg) in g.items()
}
def group_outputs(outputs: List[SAMResult]) -> FinalGrouping:
as_efficient_masks: List[EfficientMask] = [
EfficientMask(
res['segmentation'],
res['predicted_iou'] * (res['stability_score'] ** 2),
size=res['area'],
) for res in outputs
]
in_order = sorted(as_efficient_masks, key=lambda x: x.get_size(), reverse=True)
return get_groups_simple(in_order)
def main():
all_images = os.listdir(SOURCE_DIR)
target_images = all_images[LOW:HIGH]
sam_checkpoint = SETEV_MODEL_ROOT
model_type = "vit_h"
device = "cuda"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
generator = SamAutomaticMaskGenerator(
sam,
points_per_side = 50,
points_per_batch = 64,
pred_iou_thresh = 0.8,
stability_score_thresh = 0.94,
stability_score_offset = 1.0,
box_nms_thresh = 0.97,
min_mask_region_area = 1000,
output_mode = "binary_mask",
)
first_start = time.time()
for idx, img in enumerate(target_images):
try:
start_time = time.time()
path = os.path.join(SOURCE_DIR, img)
img_array = cv2.imread(path)
img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)
canny_points = get_points_from_canny_greedy(img_array, distance_threshold=12, jitter_amount=35, num_extra=8)
if len(canny_points) == 0:
canny_results = []
print(f"[{time.time() - first_start}] No canny points for image {idx+LOW} : {time.time() - start_time}")
else:
points_for_sam = np.array([
[pt[1]/img_array.shape[1], pt[0]/img_array.shape[0]] for pt in canny_points
])
canny_generator = SamAutomaticMaskGenerator(
sam,
points_per_side=None,
point_grids=points_for_sam,
points_per_batch = 64,
pred_iou_thresh = 0.8,
stability_score_thresh = 0.94,
stability_score_offset = 1.0,
box_nms_thresh = 0.97,
min_mask_region_area = 1000,
output_mode = "binary_mask",
)
canny_results = canny_generator.generate(img_array)
print(f"[{time.time() - first_start}] SA canny compute time for image {idx+LOW} : {time.time() - start_time}")
result = generator.generate(img_array)
print(f"[{time.time() - first_start}] SA compute time for image {idx+LOW} : {time.time() - start_time}")
result += canny_results
grouped = group_outputs(result) | refined = refine_groups_simple(grouped) | 0 | 2023-12-13 16:16:48+00:00 | 8k |
daswer123/xtts-webui | scripts/resemble_enhance/enhancer/enhancer.py | [
{
"identifier": "Normalizer",
"path": "scripts/resemble_enhance/common.py",
"snippet": "class Normalizer(nn.Module):\n def __init__(self, momentum=0.01, eps=1e-9):\n super().__init__()\n self.momentum = momentum\n self.eps = eps\n self.running_mean_unsafe: Tensor\n ... | import logging
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch import Tensor, nn
from torch.distributions import Beta
from ..common import Normalizer
from ..denoiser.inference import load_denoiser
from ..melspec import MelSpectrogram
from ..utils.distributed import global_leader_only
from ..utils.train_loop import TrainLoop
from .hparams import HParams
from .lcfm import CFM, IRMAE, LCFM
from .univnet import UnivNet | 7,099 |
logger = logging.getLogger(__name__)
def _maybe(fn):
def _fn(*args):
if args[0] is None:
return None
return fn(*args)
return _fn
def _normalize_wav(x: Tensor):
return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7)
class Enhancer(nn.Module):
def __init__(self, hp: HParams):
super().__init__()
self.hp = hp
n_mels = self.hp.num_mels
vocoder_input_dim = n_mels + self.hp.vocoder_extra_dim
latent_dim = self.hp.lcfm_latent_dim
self.lcfm = LCFM(
IRMAE(
input_dim=n_mels,
output_dim=vocoder_input_dim,
latent_dim=latent_dim,
),
CFM(
cond_dim=n_mels,
output_dim=self.hp.lcfm_latent_dim,
solver_nfe=self.hp.cfm_solver_nfe,
solver_method=self.hp.cfm_solver_method,
time_mapping_divisor=self.hp.cfm_time_mapping_divisor,
),
z_scale=self.hp.lcfm_z_scale,
)
self.lcfm.set_mode_(self.hp.lcfm_training_mode)
self.mel_fn = MelSpectrogram(hp)
self.vocoder = UnivNet(self.hp, vocoder_input_dim)
self.denoiser = load_denoiser(self.hp.denoiser_run_dir, "cpu")
self.normalizer = Normalizer()
self._eval_lambd = 0.0
self.dummy: Tensor
self.register_buffer("dummy", torch.zeros(1))
if self.hp.enhancer_stage1_run_dir is not None:
pretrained_path = self.hp.enhancer_stage1_run_dir / "ds/G/default/mp_rank_00_model_states.pt"
self._load_pretrained(pretrained_path)
logger.info(f"{self.__class__.__name__} summary")
logger.info(f"{self.summarize()}")
def _load_pretrained(self, path):
# Clone is necessary as otherwise it holds a reference to the original model
cfm_state_dict = {k: v.clone() for k, v in self.lcfm.cfm.state_dict().items()}
denoiser_state_dict = {k: v.clone() for k, v in self.denoiser.state_dict().items()}
state_dict = torch.load(path, map_location="cpu")["module"]
self.load_state_dict(state_dict, strict=False)
self.lcfm.cfm.load_state_dict(cfm_state_dict) # Reset cfm
self.denoiser.load_state_dict(denoiser_state_dict) # Reset denoiser
logger.info(f"Loaded pretrained model from {path}")
def summarize(self):
npa_train = lambda m: sum(p.numel() for p in m.parameters() if p.requires_grad)
npa = lambda m: sum(p.numel() for p in m.parameters())
rows = []
for name, module in self.named_children():
rows.append(dict(name=name, trainable=npa_train(module), total=npa(module)))
rows.append(dict(name="total", trainable=npa_train(self), total=npa(self)))
df = pd.DataFrame(rows)
return df.to_markdown(index=False)
def to_mel(self, x: Tensor, drop_last=True):
"""
Args:
x: (b t), wavs
Returns:
o: (b c t), mels
"""
if drop_last:
return self.mel_fn(x)[..., :-1] # (b d t)
return self.mel_fn(x)
@global_leader_only
@torch.no_grad()
def _visualize(self, original_mel, denoised_mel):
|
logger = logging.getLogger(__name__)
def _maybe(fn):
def _fn(*args):
if args[0] is None:
return None
return fn(*args)
return _fn
def _normalize_wav(x: Tensor):
return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7)
class Enhancer(nn.Module):
def __init__(self, hp: HParams):
super().__init__()
self.hp = hp
n_mels = self.hp.num_mels
vocoder_input_dim = n_mels + self.hp.vocoder_extra_dim
latent_dim = self.hp.lcfm_latent_dim
self.lcfm = LCFM(
IRMAE(
input_dim=n_mels,
output_dim=vocoder_input_dim,
latent_dim=latent_dim,
),
CFM(
cond_dim=n_mels,
output_dim=self.hp.lcfm_latent_dim,
solver_nfe=self.hp.cfm_solver_nfe,
solver_method=self.hp.cfm_solver_method,
time_mapping_divisor=self.hp.cfm_time_mapping_divisor,
),
z_scale=self.hp.lcfm_z_scale,
)
self.lcfm.set_mode_(self.hp.lcfm_training_mode)
self.mel_fn = MelSpectrogram(hp)
self.vocoder = UnivNet(self.hp, vocoder_input_dim)
self.denoiser = load_denoiser(self.hp.denoiser_run_dir, "cpu")
self.normalizer = Normalizer()
self._eval_lambd = 0.0
self.dummy: Tensor
self.register_buffer("dummy", torch.zeros(1))
if self.hp.enhancer_stage1_run_dir is not None:
pretrained_path = self.hp.enhancer_stage1_run_dir / "ds/G/default/mp_rank_00_model_states.pt"
self._load_pretrained(pretrained_path)
logger.info(f"{self.__class__.__name__} summary")
logger.info(f"{self.summarize()}")
def _load_pretrained(self, path):
# Clone is necessary as otherwise it holds a reference to the original model
cfm_state_dict = {k: v.clone() for k, v in self.lcfm.cfm.state_dict().items()}
denoiser_state_dict = {k: v.clone() for k, v in self.denoiser.state_dict().items()}
state_dict = torch.load(path, map_location="cpu")["module"]
self.load_state_dict(state_dict, strict=False)
self.lcfm.cfm.load_state_dict(cfm_state_dict) # Reset cfm
self.denoiser.load_state_dict(denoiser_state_dict) # Reset denoiser
logger.info(f"Loaded pretrained model from {path}")
def summarize(self):
npa_train = lambda m: sum(p.numel() for p in m.parameters() if p.requires_grad)
npa = lambda m: sum(p.numel() for p in m.parameters())
rows = []
for name, module in self.named_children():
rows.append(dict(name=name, trainable=npa_train(module), total=npa(module)))
rows.append(dict(name="total", trainable=npa_train(self), total=npa(self)))
df = pd.DataFrame(rows)
return df.to_markdown(index=False)
def to_mel(self, x: Tensor, drop_last=True):
"""
Args:
x: (b t), wavs
Returns:
o: (b c t), mels
"""
if drop_last:
return self.mel_fn(x)[..., :-1] # (b d t)
return self.mel_fn(x)
@global_leader_only
@torch.no_grad()
def _visualize(self, original_mel, denoised_mel): | loop = TrainLoop.get_running_loop() | 4 | 2023-12-14 06:34:12+00:00 | 8k |
FrozenBurning/PrimDiffusion | primdiffusion/model/attcond_smpl_model.py | [
{
"identifier": "make_postex",
"path": "dva/geom.py",
"snippet": "def make_postex(v, idxim, barim):\n return (\n barim[None, :, :, 0, None] * v[:, idxim[:, :, 0]]\n + barim[None, :, :, 1, None] * v[:, idxim[:, :, 1]]\n + barim[None, :, :, 2, None] * v[:, idxim[:, :, 2]]\n ).pe... | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import dva.layers as la
import logging
from dva.geom import (
make_postex,
compute_tbn,
axisangle_to_matrix,
project_points_multi,
GeometryModule,
)
from dva.layers import ConvBlock, tile2d
from primdiffusion.model.transformer import SpatialTransformer
from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
from pytorch3d.structures import Meshes
from easymocap.smplmodel import SMPLlayer | 4,036 |
logger = logging.getLogger(__name__)
def init_primitives(slab_size, n_prims, lbs_fn, geo_fn, ref_frame, scale=15000.0):
stride = slab_size // int(n_prims**0.5)
device = geo_fn.vt.device
_, face_index_imp, bary_index_imp = geo_fn.render_index_images(
slab_size, impaint=True
)
bary_index_imp = th.as_tensor(bary_index_imp, device=device)
prim_bary_img = bary_index_imp[stride // 2 :: stride, stride // 2 :: stride]
prim_vidx_img = geo_fn.vi[
face_index_imp[stride // 2 :: stride, stride // 2 :: stride]
]
prim_vtidx_img = geo_fn.vti[
face_index_imp[stride // 2 :: stride, stride // 2 :: stride]
]
# getting actual geometrical coordinates
ref_frame = {
"poses": th.as_tensor(ref_frame["poses"]),
"shapes": th.as_tensor(ref_frame["shapes"]),
"Rh": th.as_tensor(ref_frame["Rh"]),
"Th": th.as_tensor(ref_frame["Th"]),
}
# convert to mm
geom = lbs_fn(**ref_frame) * 1000.0
prim_pos_mesh = (
make_postex(geom, prim_vidx_img, prim_bary_img)
.permute(0, 2, 3, 1)
.reshape(n_prims, 3)
)
distance = th.cdist(prim_pos_mesh, prim_pos_mesh)
# get a small neigbhourhood around
nbs_dists = th.topk(distance, k=24, largest=False).values[:, 1:].mean(dim=-1)
nbs_dists = nbs_dists.clip(5.0, 50.0)
prim_scale = scale * (1.0 / nbs_dists)
return prim_vidx_img, prim_vtidx_img, prim_bary_img, prim_scale, geom
class BodyDecoder(nn.Module):
def __init__(
self,
assets,
n_prims,
prim_size,
n_pose_dims,
n_pose_enc_channels,
n_embs_channels=64,
prim_motion_enabled=False,
prim_motion_start_train=100,
prim_rt_enabled=True,
n_init_channels=64,
uv_size=512,
smpl_gender="neutral",
image_height=1024,
image_width=1024,
):
super().__init__()
self.uv_size = uv_size
self.lbs_fn = SMPLlayer(
assets.smpl_path,
model_type="smpl",
gender=smpl_gender,
)
# initializing primitives
self.n_prims = n_prims
self.n_prims_x = int(n_prims**0.5)
self.n_prims_y = int(n_prims**0.5)
self.prim_size = prim_size
self.slab_size = int(n_prims**0.5 * prim_size)
logger.info(
f"slab_size={self.slab_size}, prim_size={self.prim_size}, n_prims={self.n_prims}"
)
self.prim_motion_enabled = prim_motion_enabled
self.prim_motion_start_train = prim_motion_start_train
self.prim_rt_enabled = prim_rt_enabled
logger.info("initializing geometry module...")
|
logger = logging.getLogger(__name__)
def init_primitives(slab_size, n_prims, lbs_fn, geo_fn, ref_frame, scale=15000.0):
stride = slab_size // int(n_prims**0.5)
device = geo_fn.vt.device
_, face_index_imp, bary_index_imp = geo_fn.render_index_images(
slab_size, impaint=True
)
bary_index_imp = th.as_tensor(bary_index_imp, device=device)
prim_bary_img = bary_index_imp[stride // 2 :: stride, stride // 2 :: stride]
prim_vidx_img = geo_fn.vi[
face_index_imp[stride // 2 :: stride, stride // 2 :: stride]
]
prim_vtidx_img = geo_fn.vti[
face_index_imp[stride // 2 :: stride, stride // 2 :: stride]
]
# getting actual geometrical coordinates
ref_frame = {
"poses": th.as_tensor(ref_frame["poses"]),
"shapes": th.as_tensor(ref_frame["shapes"]),
"Rh": th.as_tensor(ref_frame["Rh"]),
"Th": th.as_tensor(ref_frame["Th"]),
}
# convert to mm
geom = lbs_fn(**ref_frame) * 1000.0
prim_pos_mesh = (
make_postex(geom, prim_vidx_img, prim_bary_img)
.permute(0, 2, 3, 1)
.reshape(n_prims, 3)
)
distance = th.cdist(prim_pos_mesh, prim_pos_mesh)
# get a small neigbhourhood around
nbs_dists = th.topk(distance, k=24, largest=False).values[:, 1:].mean(dim=-1)
nbs_dists = nbs_dists.clip(5.0, 50.0)
prim_scale = scale * (1.0 / nbs_dists)
return prim_vidx_img, prim_vtidx_img, prim_bary_img, prim_scale, geom
class BodyDecoder(nn.Module):
def __init__(
self,
assets,
n_prims,
prim_size,
n_pose_dims,
n_pose_enc_channels,
n_embs_channels=64,
prim_motion_enabled=False,
prim_motion_start_train=100,
prim_rt_enabled=True,
n_init_channels=64,
uv_size=512,
smpl_gender="neutral",
image_height=1024,
image_width=1024,
):
super().__init__()
self.uv_size = uv_size
self.lbs_fn = SMPLlayer(
assets.smpl_path,
model_type="smpl",
gender=smpl_gender,
)
# initializing primitives
self.n_prims = n_prims
self.n_prims_x = int(n_prims**0.5)
self.n_prims_y = int(n_prims**0.5)
self.prim_size = prim_size
self.slab_size = int(n_prims**0.5 * prim_size)
logger.info(
f"slab_size={self.slab_size}, prim_size={self.prim_size}, n_prims={self.n_prims}"
)
self.prim_motion_enabled = prim_motion_enabled
self.prim_motion_start_train = prim_motion_start_train
self.prim_rt_enabled = prim_rt_enabled
logger.info("initializing geometry module...") | self.geo_fn = GeometryModule( | 4 | 2023-12-06 05:12:55+00:00 | 8k |
LSimon95/megatts2 | prepare_ds.py | [
{
"identifier": "TextTokenizer",
"path": "modules/tokenizer.py",
"snippet": "class TextTokenizer:\n def __init__(self) -> None:\n\n self.separator = Separator(word=\"_\", syllable=\"-\", phone=\"|\")\n self.pinyin2lty = get_pinyin2lty()\n\n def phonemize(self, text: str) -> str:\n ... | import os
import glob
import argparse
import soundfile as sf
import librosa
from modules.tokenizer import TextTokenizer
from multiprocessing import Pool
from tqdm.auto import tqdm
from utils.textgrid import read_textgrid
from lhotse import validate_recordings_and_supervisions, CutSet, NumpyHdf5Writer, load_manifest_lazy
from lhotse.audio import Recording, RecordingSet
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.recipes.utils import read_manifests_if_cached
from lhotse.utils import Seconds, compute_num_frames
from functools import partial
from modules.tokenizer import (
HIFIGAN_SR,
HIFIGAN_HOP_LENGTH,
MelSpecExtractor,
AudioFeatExtraConfig
)
from utils.symbol_table import SymbolTable | 4,618 | '''
wavs dir
├── speaker1
│ ├── s1wav1.wav
│ ├── s1wav1.txt
│ ├── s1wav2.wav
│ ├── s1wav2.txt
│ ├── ...
├── speaker2
│ ├── s2wav1.wav
│ ├── s2wav1.txt
│ ├── ...
cautions: stage 0 will delete all txt files in wavs dir
'''
def make_lab(tt, wav):
id = wav.split('/')[-1].split('.')[0]
folder = '/'.join(wav.split('/')[:-1])
# Create lab files
with open(f'{folder}/{id}.txt', 'r') as f:
txt = f.read()
with open(f'{folder}/{id}.lab', 'w') as f:
f.write(' '.join(tt.tokenize(txt)))
class DatasetMaker:
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--stage', type=int, default=0,
help='Stage to start from')
parser.add_argument('--wavtxt_path', type=str,
default='data/wavs/', help='Path to wav and txt files')
parser.add_argument('--text_grid_path', type=str,
default='data/textgrids/', help='Path to textgrid files')
parser.add_argument('--ds_path', type=str,
default='data/ds/', help='Path to save dataset')
parser.add_argument('--num_workers', type=int,
default=4, help='Number of workers')
parser.add_argument('--test_set_ratio', type=float,
default=0.03, help='Test set ratio')
parser.add_argument('--trim_wav', type=bool,
default=False, help='Trim wav by textgrid')
self.args = parser.parse_args()
self.test_set_interval = int(1 / self.args.test_set_ratio)
def make_labs(self):
wavs = glob.glob(f'{self.args.wavtxt_path}/**/*.wav', recursive=True)
tt = TextTokenizer()
with Pool(self.args.num_workers) as p:
for _ in tqdm(p.imap(partial(make_lab, tt), wavs), total=len(wavs)):
pass
def make_ds(self):
tgs = glob.glob(
f'{self.args.text_grid_path}/**/*.TextGrid', recursive=True)
recordings = [[], []] # train, test
supervisions = [[], []]
set_name = ['train', 'valid']
max_duration_token = 0
for i, tg in tqdm(enumerate(tgs)):
id = tg.split('/')[-1].split('.')[0]
speaker = tg.split('/')[-2]
intervals = [i for i in read_textgrid(tg) if (i[3] == 'phones')]
y, sr = librosa.load(
f'{self.args.wavtxt_path}/{speaker}/{id}.wav', sr=HIFIGAN_SR)
if intervals[0][2] == '':
intervals = intervals[1:]
if intervals[-1][2] == '':
intervals = intervals[:-1]
if self.args.trim_wav:
start = intervals[0][0]*sr
stop = intervals[-1][1]*sr
y = y[int(start):int(stop)]
y = librosa.util.normalize(y)
sf.write(
f'{self.args.wavtxt_path}/{speaker}/{id}.wav', y, HIFIGAN_SR)
start = intervals[0][0]
stop = intervals[-1][1]
| '''
wavs dir
├── speaker1
│ ├── s1wav1.wav
│ ├── s1wav1.txt
│ ├── s1wav2.wav
│ ├── s1wav2.txt
│ ├── ...
├── speaker2
│ ├── s2wav1.wav
│ ├── s2wav1.txt
│ ├── ...
cautions: stage 0 will delete all txt files in wavs dir
'''
def make_lab(tt, wav):
id = wav.split('/')[-1].split('.')[0]
folder = '/'.join(wav.split('/')[:-1])
# Create lab files
with open(f'{folder}/{id}.txt', 'r') as f:
txt = f.read()
with open(f'{folder}/{id}.lab', 'w') as f:
f.write(' '.join(tt.tokenize(txt)))
class DatasetMaker:
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--stage', type=int, default=0,
help='Stage to start from')
parser.add_argument('--wavtxt_path', type=str,
default='data/wavs/', help='Path to wav and txt files')
parser.add_argument('--text_grid_path', type=str,
default='data/textgrids/', help='Path to textgrid files')
parser.add_argument('--ds_path', type=str,
default='data/ds/', help='Path to save dataset')
parser.add_argument('--num_workers', type=int,
default=4, help='Number of workers')
parser.add_argument('--test_set_ratio', type=float,
default=0.03, help='Test set ratio')
parser.add_argument('--trim_wav', type=bool,
default=False, help='Trim wav by textgrid')
self.args = parser.parse_args()
self.test_set_interval = int(1 / self.args.test_set_ratio)
def make_labs(self):
wavs = glob.glob(f'{self.args.wavtxt_path}/**/*.wav', recursive=True)
tt = TextTokenizer()
with Pool(self.args.num_workers) as p:
for _ in tqdm(p.imap(partial(make_lab, tt), wavs), total=len(wavs)):
pass
def make_ds(self):
tgs = glob.glob(
f'{self.args.text_grid_path}/**/*.TextGrid', recursive=True)
recordings = [[], []] # train, test
supervisions = [[], []]
set_name = ['train', 'valid']
max_duration_token = 0
for i, tg in tqdm(enumerate(tgs)):
id = tg.split('/')[-1].split('.')[0]
speaker = tg.split('/')[-2]
intervals = [i for i in read_textgrid(tg) if (i[3] == 'phones')]
y, sr = librosa.load(
f'{self.args.wavtxt_path}/{speaker}/{id}.wav', sr=HIFIGAN_SR)
if intervals[0][2] == '':
intervals = intervals[1:]
if intervals[-1][2] == '':
intervals = intervals[:-1]
if self.args.trim_wav:
start = intervals[0][0]*sr
stop = intervals[-1][1]*sr
y = y[int(start):int(stop)]
y = librosa.util.normalize(y)
sf.write(
f'{self.args.wavtxt_path}/{speaker}/{id}.wav', y, HIFIGAN_SR)
start = intervals[0][0]
stop = intervals[-1][1]
| frame_shift=HIFIGAN_HOP_LENGTH / HIFIGAN_SR | 3 | 2023-12-10 15:02:54+00:00 | 8k |
ml-stat-Sustech/TorchCP | examples/imagenet_example.py | [
{
"identifier": "ClassWisePredictor",
"path": "torchcp/classification/predictors/classwise.py",
"snippet": "class ClassWisePredictor(SplitPredictor):\n \"\"\"\n\n Applications of Class-Conditional Conformal Predictor in Multi-Class Classification (Shi et al., 2013)\n paper: https://ieeexplore.i... | import argparse
import os
import torch
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as trn
from tqdm import tqdm
from torchcp.classification.predictors import ClusterPredictor, ClassWisePredictor, SplitPredictor
from torchcp.classification.scores import THR, APS, SAPS, RAPS
from torchcp.classification import Metrics
from torchcp.utils import fix_randomness
from examples.common.dataset import build_dataset | 7,046 | # Copyright (c) 2023-present, SUSTech-ML.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--alpha', default=0.1, type=float)
args = parser.parse_args()
fix_randomness(seed=args.seed)
#######################################
# Loading ImageNet dataset and a pytorch model
#######################################
model_name = 'ResNet101'
model = torchvision.models.resnet101(weights="IMAGENET1K_V1", progress=True)
model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(model_device)
| # Copyright (c) 2023-present, SUSTech-ML.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--alpha', default=0.1, type=float)
args = parser.parse_args()
fix_randomness(seed=args.seed)
#######################################
# Loading ImageNet dataset and a pytorch model
#######################################
model_name = 'ResNet101'
model = torchvision.models.resnet101(weights="IMAGENET1K_V1", progress=True)
model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(model_device)
| dataset = build_dataset('imagenet') | 9 | 2023-12-06 09:08:41+00:00 | 8k |
vintagedave/Fontimize | tests.py | [
{
"identifier": "get_used_characters_in_html",
"path": "fontimize.py",
"snippet": "def get_used_characters_in_html(html : str) -> set[chr]:\n soup = BeautifulSoup(html, 'html.parser')\n text = soup.get_text()\n return get_used_characters_in_str(text)"
},
{
"identifier": "charPair",
... | import os
import unittest
import sys
from unittest.mock import patch
from fontimize import get_used_characters_in_html, charPair, _get_char_ranges, optimise_fonts, optimise_fonts_for_files
from fontTools.ttLib import woff2, TTFont | 4,243 |
def test_html_with_links(self):
self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!'))
def test_html_with_nested_tags(self):
self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!'))
class TestCharPairs(unittest.TestCase):
def test_get_range_with_single_char(self):
self.assertEqual(charPair('a', 'a').get_range(), 'U+0061')
# Note that the second of the pair does not have the "U+" -- this caught me out
# with parse errors inside TTF2Web()
def test_get_range_with_two_chars(self):
self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062')
def test_get_range_with_multiple_chars(self):
self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064')
class TestCharRanges(unittest.TestCase):
def test_empty(self):
self.assertEqual(_get_char_ranges([]), [])
def test_single_char(self):
self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')])
def test_two_sequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')])
def test_two_nonsequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')])
def test_multiple_ranges(self):
self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')])
# Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string
def _count_glyphs_in_font(fontpath):
# with open(fontpath, 'rb') as f:
# wfr = woff2.WOFF2Reader(f)
# cmap = font['cmap']
# return len(cmap.getBestCmap())
# font.flavor = None # Decompress the font data
font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr)
font.flavor = None # Decompress the font data
num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird
return num_glyphs
# Does a named glyph exist in the font?
def _font_contains(fontpath, charname : str) -> bool:
font = TTFont(fontpath)
font.flavor = None # Decompress the font data
return charname in font.getGlyphOrder()
class TestOptimiseFonts(unittest.TestCase):
# Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase
test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz"
def test_optimise_fonts_with_single_font(self):
result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False)
# Basics
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
# Generated with the right name
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number
# For +1, see test_optimise_fonts_with_empty_text
self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font(foundfonts['tests/Spirax-Regular.ttf']))
def test_optimise_fonts_with_multiple_fonts(self):
result = optimise_fonts(self.test_string,
['tests/Spirax-Regular.ttf', 'tests/EBGaramond-VariableFont_wght.ttf', 'tests/EBGaramond-Italic-VariableFont_wght.ttf'],
fontpath='tests/output', verbose=False, print_stats=False)
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
self.assertIn('tests/EBGaramond-VariableFont_wght.ttf', foundfonts)
self.assertEqual(foundfonts['tests/EBGaramond-VariableFont_wght.ttf'], 'tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2')
self.assertIn('tests/EBGaramond-Italic-VariableFont_wght.ttf', foundfonts)
self.assertEqual(foundfonts['tests/EBGaramond-Italic-VariableFont_wght.ttf'], 'tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number
# + 1 for the tests below -- see test_optimise_fonts_with_empty_text
self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2'))
# + 16, + 12: EB Garamond contains multiple f-ligatures (eg fi), plus other variants, so the number of glyphs is higher. Italic has fewer.
self.assertEqual(len(self.test_string) + 1 + 16, _count_glyphs_in_font('tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2'))
self.assertEqual(len(self.test_string) + 1 + 12, _count_glyphs_in_font('tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2'))
def test_optimise_fonts_with_empty_text(self):
result = optimise_fonts("",
['tests/Spirax-Regular.ttf'],
fontpath='tests/output',
verbose=False, print_stats=False)
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number: two, because an empty string is reported as containing space, see get_used_characters_in_str
# and fonts also seem to contain ".notdef":
# > font.getGlyphOrder()
# > ['.notdef', 'space']
self.assertEqual(2, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2'))
class TestOptimiseFontsForFiles(unittest.TestCase):
def setUp(self):
self.files = ['tests/test1-index-css.html', 'tests/test.txt', 'tests/test2.html']
self.font_output_dir = 'tests/output'
self.subsetname = 'TestFilesSubset'
self.verbose = False
self.print_stats = False
# Not used by any HTML/CSS, mimics manually adding a font
self.fonts = ['tests/Whisper-Regular.ttf', 'tests/NotoSans-VariableFont_wdth,wght.ttf', 'tests/NotoSansJP-VariableFont_wght.ttf']
@patch.object(sys, 'stdout') # provides mock_stdout in order to hide and verify console output
def test_optimise_fonts_for_files(self, mock_stdout):
|
class TestGetUsedCharactersInHtml(unittest.TestCase):
def test_empty_html(self):
self.assertEqual(get_used_characters_in_html(''), set(' '))
def test_html_with_no_text(self):
self.assertEqual(get_used_characters_in_html('<html><body></body></html>'), set(' '))
def test_html_with_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_repeated_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World! Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_multiple_spans(self):
self.assertEqual(get_used_characters_in_html('<html><body><span>Hello</span><span>, </span><span>World!</span></body></html>'), set('Hello, World!'))
def test_html_with_multiple_divs(self):
self.assertEqual(get_used_characters_in_html('<html><body><div>Hello</div><div>, </div><div>World!</div></body></html>'), set('Hello, World!'))
def test_html_with_links(self):
self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!'))
def test_html_with_nested_tags(self):
self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!'))
class TestCharPairs(unittest.TestCase):
def test_get_range_with_single_char(self):
self.assertEqual(charPair('a', 'a').get_range(), 'U+0061')
# Note that the second of the pair does not have the "U+" -- this caught me out
# with parse errors inside TTF2Web()
def test_get_range_with_two_chars(self):
self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062')
def test_get_range_with_multiple_chars(self):
self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064')
class TestCharRanges(unittest.TestCase):
def test_empty(self):
self.assertEqual(_get_char_ranges([]), [])
def test_single_char(self):
self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')])
def test_two_sequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')])
def test_two_nonsequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')])
def test_multiple_ranges(self):
self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')])
# Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string
def _count_glyphs_in_font(fontpath):
# with open(fontpath, 'rb') as f:
# wfr = woff2.WOFF2Reader(f)
# cmap = font['cmap']
# return len(cmap.getBestCmap())
# font.flavor = None # Decompress the font data
font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr)
font.flavor = None # Decompress the font data
num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird
return num_glyphs
# Does a named glyph exist in the font?
def _font_contains(fontpath, charname : str) -> bool:
font = TTFont(fontpath)
font.flavor = None # Decompress the font data
return charname in font.getGlyphOrder()
class TestOptimiseFonts(unittest.TestCase):
# Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase
test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz"
def test_optimise_fonts_with_single_font(self):
result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False)
# Basics
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
# Generated with the right name
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number
# For +1, see test_optimise_fonts_with_empty_text
self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font(foundfonts['tests/Spirax-Regular.ttf']))
def test_optimise_fonts_with_multiple_fonts(self):
result = optimise_fonts(self.test_string,
['tests/Spirax-Regular.ttf', 'tests/EBGaramond-VariableFont_wght.ttf', 'tests/EBGaramond-Italic-VariableFont_wght.ttf'],
fontpath='tests/output', verbose=False, print_stats=False)
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
self.assertIn('tests/EBGaramond-VariableFont_wght.ttf', foundfonts)
self.assertEqual(foundfonts['tests/EBGaramond-VariableFont_wght.ttf'], 'tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2')
self.assertIn('tests/EBGaramond-Italic-VariableFont_wght.ttf', foundfonts)
self.assertEqual(foundfonts['tests/EBGaramond-Italic-VariableFont_wght.ttf'], 'tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number
# + 1 for the tests below -- see test_optimise_fonts_with_empty_text
self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2'))
# + 16, + 12: EB Garamond contains multiple f-ligatures (eg fi), plus other variants, so the number of glyphs is higher. Italic has fewer.
self.assertEqual(len(self.test_string) + 1 + 16, _count_glyphs_in_font('tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2'))
self.assertEqual(len(self.test_string) + 1 + 12, _count_glyphs_in_font('tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2'))
def test_optimise_fonts_with_empty_text(self):
result = optimise_fonts("",
['tests/Spirax-Regular.ttf'],
fontpath='tests/output',
verbose=False, print_stats=False)
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number: two, because an empty string is reported as containing space, see get_used_characters_in_str
# and fonts also seem to contain ".notdef":
# > font.getGlyphOrder()
# > ['.notdef', 'space']
self.assertEqual(2, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2'))
class TestOptimiseFontsForFiles(unittest.TestCase):
def setUp(self):
self.files = ['tests/test1-index-css.html', 'tests/test.txt', 'tests/test2.html']
self.font_output_dir = 'tests/output'
self.subsetname = 'TestFilesSubset'
self.verbose = False
self.print_stats = False
# Not used by any HTML/CSS, mimics manually adding a font
self.fonts = ['tests/Whisper-Regular.ttf', 'tests/NotoSans-VariableFont_wdth,wght.ttf', 'tests/NotoSansJP-VariableFont_wght.ttf']
@patch.object(sys, 'stdout') # provides mock_stdout in order to hide and verify console output
def test_optimise_fonts_for_files(self, mock_stdout): | result = optimise_fonts_for_files(files=self.files, font_output_dir=self.font_output_dir, subsetname=self.subsetname, fonts=self.fonts, | 4 | 2023-12-07 13:23:46+00:00 | 8k |
wanghao-cst/Omni-VideoAssistant | llava/model/omni_arch.py | [
{
"identifier": "build_vision_tower",
"path": "llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.p... | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn | 4,777 |
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_video_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375])
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_frames_features = key_frames_feature[cur_video_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
# import pdb;pdb.set_trace()
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0)
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_video_idx += 1
# import pdb;pdb.set_trace()
# never enter it
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),)
cur_new_input_embeds = []
if labels is not None: # torch.Size([4, 375])
cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0: # 统计元素个数 1
# import pdb;pdb.set_trace()
# if cur_video_idx > len(key_frames_feature)-1:
# cur_frames_features = key_frames_feature[-1] # for gradio demo
# else:
cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096])
cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096])
image_token_start = image_token_indices[0] # tensor(35, device='cuda:0')
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_frames_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else: # True
# import pdb;pdb.set_trace()
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) # instru部分的embed: torch.Size([35, 4096])
cur_new_input_embeds.append(cur_frames_features) # torch.Size([1024, 4096]) input加入frames特征
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start]) # torch.Size([35]) 全-100
cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) # torch.Size([1024])
cur_labels = cur_labels[image_token_start+1:] # 339 = 375-35-1(img_token) 稍后加到cur_new_labels中
cur_video_idx += 1
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:] # torch.Size([339])
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # 空
if cur_input_ids.numel() > 0: # True
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) # [torch.Size([35, 4096])固定template,torch.Size([1024, 4096])图像特征, QA:torch.Size([339, 4096])]
if labels is not None:
cur_new_labels.append(cur_labels) # [torch.Size([35]),torch.Size([1024]), 前面全为-100 torch.Size([339])]
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) # torch.Size([1398, 4096]): 35+1024+339
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0) # torch.Size([1398])
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): # True
max_len = max(x.shape[0] for x in new_input_embeds) # 1910
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else: # False img模式默认只有256长度相等
# import pdb;pdb.set_trace()
new_input_embeds = torch.stack(new_input_embeds, dim=0) # torch.Size([4, 716, 4096]) 716=461-1imgtoken+256imgfeature
if labels is not None: # torch.Size([4, 461])
new_labels = torch.stack(new_labels, dim=0) # torch.Size([4, 716])
if attention_mask is not None: # torch.Size([4, 461])
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) # torch.Size([4, 255]个True 相当于256个img特征-1个imgtoken
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) # torch.Size([4, 716]) 716=461+255(新加入的img特征255个token mask为True)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token: # False
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OmniMetaModel:
def __init__(self, config):
super(OmniMetaModel, self).__init__(config)
# import pdb;pdb.set_trace()
if hasattr(config, "mm_vision_tower"): # train False, v1.5 continue finetune True
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
# import pdb;pdb.set_trace()
if hasattr(config, "mm_video_fuser"):
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1))
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096 for exp1 test uncomment it
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None): # Train
vision_tower = model_args.vision_tower # 'openai/clip-vit-large-patch14'
mm_vision_select_layer = model_args.mm_vision_select_layer # -2
mm_vision_select_feature = model_args.mm_vision_select_feature # patch
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter # '/home/wanghao/weights/llava/llava-pretrain-vicuna-7b-v1.3/mm_projector.bin'
self.config.mm_vision_tower = vision_tower
# import pdb;pdb.set_trace()
# vision_tower = build_vision_tower(model_args)
if self.get_vision_tower() is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
else: ## Implement continue finetuning.
if fsdp is not None and len(fsdp) > 0:
vision_tower = self.vision_tower[0]
else:
vision_tower = self.vision_tower
vision_tower.load_model()
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size # 1024
self.config.mm_vision_select_layer = mm_vision_select_layer # -2
self.config.mm_vision_select_feature = mm_vision_select_feature # patch
# self.mm_projector = build_vision_projector(self.config) # 1024->4096
if getattr(self, 'mm_projector', None) is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有
self.mm_projector = build_vision_projector(self.config)
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
p.requires_grad = True
# import pdb;pdb.set_trace()
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} # weight:torch.Size([4096, 1024]) bias:torch.Size([4096])
# import pdb;pdb.set_trace()
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
# v1.5: mm_projector_weights['model.mm_projector.0.weight'].shape: torch.Size([4096, 1024])
# model.mm_projector.0.bias: torch.Size([4096]); model.mm_projector.2.weight: torch.Size([4096, 4096]); model.mm_projector.2.bias: torch.Size([4096])
if getattr(self, 'frames_conv', None) is None: ## Implement continue finetuning.
# self.frames_attn = MultiheadAttention(256*4096, num_heads)
# self.frames_conv = nn.Conv2d(4096, 4096, kernel_size=(12,1), stride=(10,1)) # b 4096 51 256
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
# self.keyframes_attn = MultiheadAttention(256*4096, num_heads)
# import pdb;pdb.set_trace()
self.config.mm_video_fuser = 'frames_conv'
class OmniMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_frames(self, frames):
frames_features = self.get_model().get_vision_tower()(frames) # torch.Size([276, 256, 1024])
frames_features = self.get_model().mm_projector(frames_features) # torch.Size([276, 256, 4096]) torch.float16
return frames_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, videos
):
vision_tower = self.get_vision_tower()
# import pdb;pdb.set_trace()
# frames_attn = self.get_model().frames_attn
frames_conv = self.get_model().frames_conv
# keyframes_attn = self.get_model().keyframes_attn
# import pdb;pdb.set_trace()
if vision_tower is None or videos is None or input_ids.shape[1] == 1: # False
if past_key_values is not None and vision_tower is not None and videos is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
# videos = [torch.Size([51, 3, 224, 224]), torch.Size([79, 3, 224, 224]), torch.Size([60, 3, 224, 224]), torch.Size([86, 3, 224, 224])]
assert type(videos) is list or videos.ndim == 5 # True
concat_frames = torch.cat([video for video in videos], dim=0) # torch.Size([79, 3, 336, 336])
# import pdb;pdb.set_trace()
frames_features = self.encode_frames(concat_frames) # torch.Size([276, 256, 4096]) torch.Size([79, 576, 4096])
split_sizes = [video.shape[0] for video in videos] # [51, 79, 60, 86]
frames_features = torch.split(frames_features, split_sizes, dim=0) # (torch.Size([51, 256, 4096]), torch.Size([79, 256, 4096]), torch.Size([60, 256, 4096]), torch.Size([86, 256, 4096]))
# import pdb;pdb.set_trace()
# frames_features = [x.flatten(0, 1) for x in frames_features]
key_frames_feature = []
for frame_feature in frames_features:
# import pdb;pdb.set_trace()
frame_feature = frame_feature.unsqueeze(0) # b 51 256 4096
frame_feature = frame_feature.permute(0,2,1,3) # b 256 51 4096
# short video
if frame_feature.shape[2] >= 12:
frame_feature = frames_conv(frame_feature) # torch.Size([1, 256, 4, 4096])
frame_feature = frame_feature.squeeze(0).permute(1,0,2) # torch.Size([4, 256, 4096])
# key_frames_feature.append(frame_feature[:6])
# import pdb;pdb.set_trace()
num_frames = frame_feature.shape[0]
key_frames_feature.append(frame_feature[::max(1,num_frames//5)][:6]) # v1.5 576 patch
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_video_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375])
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_frames_features = key_frames_feature[cur_video_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
# import pdb;pdb.set_trace()
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0)
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_video_idx += 1
# import pdb;pdb.set_trace()
# never enter it
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),)
cur_new_input_embeds = []
if labels is not None: # torch.Size([4, 375])
cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0: # 统计元素个数 1
# import pdb;pdb.set_trace()
# if cur_video_idx > len(key_frames_feature)-1:
# cur_frames_features = key_frames_feature[-1] # for gradio demo
# else:
cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096])
cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096])
image_token_start = image_token_indices[0] # tensor(35, device='cuda:0')
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_frames_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else: # True
# import pdb;pdb.set_trace()
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) # instru部分的embed: torch.Size([35, 4096])
cur_new_input_embeds.append(cur_frames_features) # torch.Size([1024, 4096]) input加入frames特征
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start]) # torch.Size([35]) 全-100
cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) # torch.Size([1024])
cur_labels = cur_labels[image_token_start+1:] # 339 = 375-35-1(img_token) 稍后加到cur_new_labels中
cur_video_idx += 1
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:] # torch.Size([339])
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # 空
if cur_input_ids.numel() > 0: # True
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) # [torch.Size([35, 4096])固定template,torch.Size([1024, 4096])图像特征, QA:torch.Size([339, 4096])]
if labels is not None:
cur_new_labels.append(cur_labels) # [torch.Size([35]),torch.Size([1024]), 前面全为-100 torch.Size([339])]
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) # torch.Size([1398, 4096]): 35+1024+339
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0) # torch.Size([1398])
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): # True
max_len = max(x.shape[0] for x in new_input_embeds) # 1910
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else: # False img模式默认只有256长度相等
# import pdb;pdb.set_trace()
new_input_embeds = torch.stack(new_input_embeds, dim=0) # torch.Size([4, 716, 4096]) 716=461-1imgtoken+256imgfeature
if labels is not None: # torch.Size([4, 461])
new_labels = torch.stack(new_labels, dim=0) # torch.Size([4, 716])
if attention_mask is not None: # torch.Size([4, 461])
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) # torch.Size([4, 255]个True 相当于256个img特征-1个imgtoken
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) # torch.Size([4, 716]) 716=461+255(新加入的img特征255个token mask为True)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token: # False | tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) | 4 | 2023-12-05 08:02:17+00:00 | 8k |
OpenDriveLab/LaneSegNet | projects/lanesegnet/datasets/openlanev2_subset_A_lanesegnet_dataset.py | [
{
"identifier": "lanesegnet_evaluate",
"path": "projects/lanesegnet/datasets/openlanev2_evaluate_custom.py",
"snippet": "def lanesegnet_evaluate(ground_truth, predictions, verbose=True):\n\n if isinstance(ground_truth, str):\n ground_truth = io.pickle_load(ground_truth)\n\n if predictions i... | import os
import random
import copy
import numpy as np
import torch
import mmcv
import cv2
import shapely
from shapely.geometry import LineString
from pyquaternion import Quaternion
from mmcv.parallel import DataContainer as DC
from mmdet.datasets import DATASETS
from mmdet3d.datasets import Custom3DDataset
from .openlanev2_evaluate_custom import lanesegnet_evaluate
from ..core.lane.util import fix_pts_interpolate | 3,726 | right_boundary = lane['right_laneline']
LineString_right_boundary = LineString(right_boundary)
gt_lanes.append([LineString_lane, LineString_left_boundary, LineString_right_boundary])
gt_lane_labels_3d.append(0)
gt_lane_left_type.append(lane['left_laneline_type'])
gt_lane_right_type.append(lane['right_laneline_type'])
for area in ann_info['area']:
if area['category'] == 1 and 'ped_crossing' in self.LANE_CLASSES:
centerline, left_boundary, right_boundary = self.ped2lane_segment(area['points'])
gt_lanes.append([centerline, left_boundary, right_boundary])
gt_lane_labels_3d.append(1)
gt_lane_left_type.append(0)
gt_lane_right_type.append(0)
elif area['category'] == 2 and 'road_boundary' in self.LANE_CLASSES:
raise NotImplementedError
topology_lsls = np.array(ann_info['topology_lsls'], dtype=np.float32)
te_bboxes = np.array([np.array(sign['points'], dtype=np.float32).flatten() for sign in ann_info['traffic_element']])
te_labels = np.array([sign['attribute'] for sign in ann_info['traffic_element']], dtype=np.int64)
if len(te_bboxes) == 0:
te_bboxes = np.zeros((0, 4), dtype=np.float32)
te_labels = np.zeros((0, ), dtype=np.int64)
topology_lste = np.array(ann_info['topology_lste'], dtype=np.float32)
annos = dict(
gt_lanes_3d = gt_lanes,
gt_lane_labels_3d = gt_lane_labels_3d,
gt_lane_adj = topology_lsls,
bboxes = te_bboxes,
labels = te_labels,
gt_lane_lcte_adj = topology_lste,
gt_lane_left_type = gt_lane_left_type,
gt_lane_right_type = gt_lane_right_type,
)
return annos
def prepare_train_data(self, index):
data_queue = []
# temporal aug
prev_indexs_list = list(range(index-self.queue_length, index))
random.shuffle(prev_indexs_list)
prev_indexs_list = sorted(prev_indexs_list[1:], reverse=True)
input_dict = self.get_data_info(index)
if input_dict is None:
return None
sample_idx = input_dict['sample_idx']
scene_token = input_dict['scene_token']
self.pre_pipeline(input_dict)
example = self.pipeline(input_dict)
if self.filter_empty_gt and \
(example is None or len(example['gt_lane_labels_3d']._data) == 0):
return None
if self.filter_empty_te and \
(example is None or len(example['gt_labels']._data) == 0):
return None
data_queue.insert(0, example)
for i in prev_indexs_list:
i = max(0, i)
input_dict = self.get_data_info(i)
if input_dict is None:
return None
if input_dict['sample_idx'] < sample_idx and input_dict['scene_token'] == scene_token:
self.pre_pipeline(input_dict)
example = self.pipeline(input_dict)
if self.filter_empty_gt and \
(example is None or len(example['gt_lane_labels_3d']._data) == 0):
return None
sample_idx = input_dict['sample_idx']
data_queue.insert(0, copy.deepcopy(example))
return self.union2one(data_queue)
def union2one(self, queue):
"""
convert sample queue into one single sample.
"""
imgs_list = [each['img'].data for each in queue]
metas_map = {}
prev_pos = None
prev_angle = None
for i, each in enumerate(queue):
metas_map[i] = each['img_metas'].data
if i == 0:
metas_map[i]['prev_bev'] = False
prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] = 0
metas_map[i]['can_bus'][-1] = 0
else:
metas_map[i]['prev_bev'] = True
tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] -= prev_pos
metas_map[i]['can_bus'][-1] -= prev_angle
prev_pos = copy.deepcopy(tmp_pos)
prev_angle = copy.deepcopy(tmp_angle)
queue[-1]['img'] = DC(torch.stack(imgs_list),
cpu_only=False, stack=True)
queue[-1]['img_metas'] = DC(metas_map, cpu_only=True)
queue = queue[-1]
return queue
def format_openlanev2_gt(self):
gt_dict = {}
for idx in range(len(self.data_infos)):
info = copy.deepcopy(self.data_infos[idx])
key = (self.split, info['segment_id'], str(info['timestamp']))
areas = []
for area in info['annotation']['area']:
if area['category'] == 1:
points = area['points']
| #---------------------------------------------------------------------------------------#
# LaneSegNet: Map Learning with Lane Segment Perception for Autonomous Driving #
# Source code: https://github.com/OpenDriveLab/LaneSegNet #
# Copyright (c) OpenDriveLab. All rights reserved. #
#---------------------------------------------------------------------------------------#
@DATASETS.register_module()
class OpenLaneV2_subset_A_LaneSegNet_Dataset(Custom3DDataset):
CAMS = ('ring_front_center', 'ring_front_left', 'ring_front_right',
'ring_rear_left', 'ring_rear_right', 'ring_side_left', 'ring_side_right')
LANE_CLASSES = ('lane_segment', 'ped_crossing', 'road_boundary')
TE_CLASSES = ('traffic_light', 'road_sign')
TE_ATTR_CLASSES = ('unknown', 'red', 'green', 'yellow',
'go_straight', 'turn_left', 'turn_right',
'no_left_turn', 'no_right_turn', 'u_turn', 'no_u_turn',
'slight_left', 'slight_right')
MAP_CHANGE_LOGS = [
'75e8adad-50a6-3245-8726-5e612db3d165',
'54bc6dbc-ebfb-3fba-b5b3-57f88b4b79ca',
'af170aac-8465-3d7b-82c5-64147e94af7d',
'6e106cf8-f6dd-38f6-89c8-9be7a71e7275',
]
def __init__(self,
data_root,
ann_file,
queue_length=1,
filter_empty_te=False,
filter_map_change=False,
points_num=10,
split='train',
**kwargs):
self.filter_map_change = filter_map_change
self.split = split
super().__init__(data_root, ann_file, **kwargs)
self.queue_length = queue_length
self.filter_empty_te = filter_empty_te
self.points_num = points_num
self.LANE_CLASSES = self.CLASSES
def load_annotations(self, ann_file):
"""Load annotation from a olv2 pkl file.
Args:
ann_file (str): Path of the annotation file.
Returns:
list[dict]: Annotation info from the json file.
"""
data_infos = mmcv.load(ann_file, file_format='pkl')
if isinstance(data_infos, dict):
if self.filter_map_change and self.split == 'train':
data_infos = [info for info in data_infos.values() if info['meta_data']['source_id'] not in self.MAP_CHANGE_LOGS]
else:
data_infos = list(data_infos.values())
return data_infos
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data \
preprocessing pipelines.
"""
info = self.data_infos[index]
input_dict = dict(
sample_idx=info['timestamp'],
scene_token=info['segment_id']
)
if self.modality['use_camera']:
image_paths = []
lidar2img_rts = []
lidar2cam_rts = []
cam_intrinsics = []
for cam_name, cam_info in info['sensor'].items():
image_path = cam_info['image_path']
image_paths.append(os.path.join(self.data_root, image_path))
# obtain lidar to image transformation matrix
lidar2cam_r = np.linalg.inv(cam_info['extrinsic']['rotation'])
lidar2cam_t = cam_info['extrinsic']['translation'] @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
intrinsic = np.array(cam_info['intrinsic']['K'])
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
lidar2img_rt = (viewpad @ lidar2cam_rt.T)
lidar2img_rts.append(lidar2img_rt)
cam_intrinsics.append(viewpad)
lidar2cam_rts.append(lidar2cam_rt.T)
input_dict.update(
dict(
img_filename=image_paths,
lidar2img=lidar2img_rts,
cam_intrinsic=cam_intrinsics,
lidar2cam=lidar2cam_rts,
))
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
if self.filter_empty_gt and len(annos['gt_lane_labels_3d']) == 0:
return None
if self.filter_empty_te and len(annos['labels']) == 0:
return None
can_bus = np.zeros(18)
rotation = Quaternion._from_matrix(np.array(info['pose']['rotation']))
can_bus[:3] = info['pose']['translation']
can_bus[3:7] = rotation
patch_angle = rotation.yaw_pitch_roll[0] / np.pi * 180
if patch_angle < 0:
patch_angle += 360
can_bus[-2] = patch_angle / 180 * np.pi
can_bus[-1] = patch_angle
input_dict['can_bus'] = can_bus
input_dict['lidar2global_rotation'] = np.array(info['pose']['rotation'])
return input_dict
def ped2lane_segment(self, points):
assert points.shape[0] == 5
dir_vector = points[1] - points[0]
dir = np.rad2deg(np.arctan2(dir_vector[1], dir_vector[0]))
if dir < -45 or dir > 135:
left_boundary = points[[2, 3]]
right_boundary = points[[1, 0]]
else:
left_boundary = points[[0, 1]]
right_boundary = points[[3, 2]]
centerline = LineString((left_boundary + right_boundary) / 2)
left_boundary = LineString(left_boundary)
right_boundary = LineString(right_boundary)
return centerline, left_boundary, right_boundary
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information
"""
info = self.data_infos[index]
ann_info = info['annotation']
gt_lanes = []
gt_lane_labels_3d = []
gt_lane_left_type = []
gt_lane_right_type = []
for idx, lane in enumerate(ann_info['lane_segment']):
centerline = lane['centerline']
LineString_lane = LineString(centerline)
left_boundary = lane['left_laneline']
LineString_left_boundary = LineString(left_boundary)
right_boundary = lane['right_laneline']
LineString_right_boundary = LineString(right_boundary)
gt_lanes.append([LineString_lane, LineString_left_boundary, LineString_right_boundary])
gt_lane_labels_3d.append(0)
gt_lane_left_type.append(lane['left_laneline_type'])
gt_lane_right_type.append(lane['right_laneline_type'])
for area in ann_info['area']:
if area['category'] == 1 and 'ped_crossing' in self.LANE_CLASSES:
centerline, left_boundary, right_boundary = self.ped2lane_segment(area['points'])
gt_lanes.append([centerline, left_boundary, right_boundary])
gt_lane_labels_3d.append(1)
gt_lane_left_type.append(0)
gt_lane_right_type.append(0)
elif area['category'] == 2 and 'road_boundary' in self.LANE_CLASSES:
raise NotImplementedError
topology_lsls = np.array(ann_info['topology_lsls'], dtype=np.float32)
te_bboxes = np.array([np.array(sign['points'], dtype=np.float32).flatten() for sign in ann_info['traffic_element']])
te_labels = np.array([sign['attribute'] for sign in ann_info['traffic_element']], dtype=np.int64)
if len(te_bboxes) == 0:
te_bboxes = np.zeros((0, 4), dtype=np.float32)
te_labels = np.zeros((0, ), dtype=np.int64)
topology_lste = np.array(ann_info['topology_lste'], dtype=np.float32)
annos = dict(
gt_lanes_3d = gt_lanes,
gt_lane_labels_3d = gt_lane_labels_3d,
gt_lane_adj = topology_lsls,
bboxes = te_bboxes,
labels = te_labels,
gt_lane_lcte_adj = topology_lste,
gt_lane_left_type = gt_lane_left_type,
gt_lane_right_type = gt_lane_right_type,
)
return annos
def prepare_train_data(self, index):
data_queue = []
# temporal aug
prev_indexs_list = list(range(index-self.queue_length, index))
random.shuffle(prev_indexs_list)
prev_indexs_list = sorted(prev_indexs_list[1:], reverse=True)
input_dict = self.get_data_info(index)
if input_dict is None:
return None
sample_idx = input_dict['sample_idx']
scene_token = input_dict['scene_token']
self.pre_pipeline(input_dict)
example = self.pipeline(input_dict)
if self.filter_empty_gt and \
(example is None or len(example['gt_lane_labels_3d']._data) == 0):
return None
if self.filter_empty_te and \
(example is None or len(example['gt_labels']._data) == 0):
return None
data_queue.insert(0, example)
for i in prev_indexs_list:
i = max(0, i)
input_dict = self.get_data_info(i)
if input_dict is None:
return None
if input_dict['sample_idx'] < sample_idx and input_dict['scene_token'] == scene_token:
self.pre_pipeline(input_dict)
example = self.pipeline(input_dict)
if self.filter_empty_gt and \
(example is None or len(example['gt_lane_labels_3d']._data) == 0):
return None
sample_idx = input_dict['sample_idx']
data_queue.insert(0, copy.deepcopy(example))
return self.union2one(data_queue)
def union2one(self, queue):
"""
convert sample queue into one single sample.
"""
imgs_list = [each['img'].data for each in queue]
metas_map = {}
prev_pos = None
prev_angle = None
for i, each in enumerate(queue):
metas_map[i] = each['img_metas'].data
if i == 0:
metas_map[i]['prev_bev'] = False
prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] = 0
metas_map[i]['can_bus'][-1] = 0
else:
metas_map[i]['prev_bev'] = True
tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
metas_map[i]['can_bus'][:3] -= prev_pos
metas_map[i]['can_bus'][-1] -= prev_angle
prev_pos = copy.deepcopy(tmp_pos)
prev_angle = copy.deepcopy(tmp_angle)
queue[-1]['img'] = DC(torch.stack(imgs_list),
cpu_only=False, stack=True)
queue[-1]['img_metas'] = DC(metas_map, cpu_only=True)
queue = queue[-1]
return queue
def format_openlanev2_gt(self):
gt_dict = {}
for idx in range(len(self.data_infos)):
info = copy.deepcopy(self.data_infos[idx])
key = (self.split, info['segment_id'], str(info['timestamp']))
areas = []
for area in info['annotation']['area']:
if area['category'] == 1:
points = area['points'] | left_boundary = fix_pts_interpolate(points[[0, 1]], 10) | 1 | 2023-12-06 07:13:48+00:00 | 8k |
RobertCsordas/moe_attention | dataset/text/chunked_setencepiece_lm_dataset.py | [
{
"identifier": "SentencepieceVocabulary",
"path": "dataset/text/tokenizers/sentencepiece.py",
"snippet": "class SentencepieceVocabulary:\n def __init__(self, path: str, train_data: Union[str, Iterator], vocab_size: int):\n global spm\n import sentencepiece as spm\n\n model_file ... | from .tokenizers.sentencepiece import SentencepieceVocabulary
from framework.utils.download import UrlStream
from framework.utils import LockFile
from typing import List, Optional, Dict, Any
from .lm_dataset import WordLevelLanguageModelTestState
import gzip
import json
import numpy as np
import os
import bisect
import time
import torch.multiprocessing as mp
import math | 3,866 | chunk_sizes = []
for i in range(self._n_chunks):
fn = self._chunk_fname(i)
if os.path.exists(fn):
chunk_sizes.append(os.path.getsize(fn) // self.data_dtype(0).itemsize)
else:
break
return chunk_sizes
def get_ready_tokens(self) -> int:
return sum(self.get_chunk_sizes())
def __init__(self, unroll_len: int, n_extra: int = 1, split: str = 'train',
cache_dir: str = "./cache/", n_tokens: int = 8000, token_limit: Optional[int] = None) -> None:
self.split = split
self.n_tokens = n_tokens
self.unroll_len = unroll_len
self.n_extra = n_extra
self.update_data_type()
self._cache_dir = os.path.join(cache_dir, self.__class__.__name__, self._get_variant_id())
self._chunk_dir = os.path.join(self._cache_dir, "tokenized_chunks", split)
self._n_chunks = self.get_n_shards()
self.chunk_sizes = [0] * self._n_chunks
self.chunk_offsets = [0] * self._n_chunks
self.chunk_mmap = [None] * self._n_chunks
self.last_available_chunk = -1
self.last_accessed_chunk = -1
self.token_limit = int(math.ceil(token_limit)) if token_limit is not None else None
os.makedirs(self._chunk_dir, exist_ok=True)
self._sp_model_name = os.path.join(self._cache_dir, "tokenizer.model")
with LockFile(self._cache_dir + "/lock"):
self.vocabulary = SentencepieceVocabulary(self._sp_model_name, GenToIt(self.get_tokenizer_train_sentences), n_tokens)
print(f"{self.__class__.__name__}: Loaded tokenizer.")
missing = [i for i in range(self._n_chunks) if not os.path.exists(self._chunk_fname(i))]
print(f"{self.__class__.__name__}: {len(missing)} chunks missing")
if missing:
if token_limit is not None:
pool = mp.Pool(min(mp.cpu_count(), len(missing)))
while True:
tokens_ready = self.get_ready_tokens()
if tokens_ready >= token_limit:
print("Token limit reached. No need to tokenize more.")
break
print(f"{self.__class__.__name__}: {tokens_ready/token_limit*100:.2f}% ready.")
chunks_ready = len(self.get_chunk_sizes())
if chunks_ready == 0:
print("Tokenizing first chunk to estimate the number of required chunks...")
pool.map(self.tokenize_chunk, [0])
continue
elif chunks_ready >= self._n_chunks:
print("All chunks ready. No need to tokenize more.")
break
n_estimated = int(math.ceil(chunks_ready * (token_limit / tokens_ready)))
print(f"{self.__class__.__name__}: Tokenizing {n_estimated} estimated chunks...")
pool.map(self.tokenize_chunk, [a for a in range(chunks_ready, n_estimated) if a in missing])
print(f"Limiting to {token_limit} tokens")
missing = missing[:token_limit // self.unroll_len]
del pool
else:
mp.Pool(min(mp.cpu_count(), len(missing))).map(self.tokenize_chunk, missing)
self.chunk_sizes = self.get_chunk_sizes()
self.chunk_offsets = self.chunk_offsets[:len(self.chunk_sizes)]
self.chunk_mmap = self.chunk_mmap[:len(self.chunk_sizes)]
lim_found = False
for i in range(1, len(self.chunk_sizes)):
self.chunk_offsets[i] = self.chunk_offsets[i - 1] + self.chunk_sizes[i]
if self.token_limit is not None and not lim_found and self.chunk_offsets[i] >= self.token_limit:
print(f"{self.__class__.__name__}: Limiting to first {i} chunks because limited to {self.token_limit} tokens")
lim_found = True
def __len__(self):
l = self.linear_len()
if self.token_limit is not None:
l = min(l, self.token_limit)
return l // self.unroll_len
def linear_len(self):
return self.chunk_sizes[-1] + self.chunk_offsets[-1]
def get_linear(self, offset: int, clen: int):
chunk_index = bisect.bisect(self.chunk_offsets, offset) - 1
chunk_offset = offset - self.chunk_offsets[chunk_index]
self.do_mmap(chunk_index)
if chunk_offset + clen > self.chunk_sizes[chunk_index]:
# Wrapping over chunk boundary
next_chunk = (chunk_index + 1) % len(self.chunk_sizes)
self.do_mmap(next_chunk)
d1 = self.chunk_mmap[chunk_index][chunk_offset:]
d2 = self.chunk_mmap[next_chunk][:clen-len(d1)]
r = np.concatenate([d1, d2])
else:
r = self.chunk_mmap[chunk_index][chunk_offset:chunk_offset+clen]
assert r.shape[0] == clen
return r
def __getitem__(self, item: int) -> Dict[str, Any]:
return {
"data": self.get_linear(item * self.unroll_len, self.unroll_len + self.n_extra)
}
| # Based on https://huggingface.co/datasets/c4/blob/main/c4.py
class GenToIt:
def __init__(self, gen, *args, **kwargs):
self.gen = gen
self.args = args
self.kwargs = kwargs
self.gen_inst = None
self.__iter__()
self.initialized = False
def __iter__(self):
assert (self.gen_inst is None) or (self.initialized == False)
self.initialized = True
self.gen_inst = self.gen(*self.args, **self.kwargs)
return self
def __next__(self):
try:
n = next(self.gen_inst)
return n
except StopIteration:
self.gen_inst = None
raise
class ChunkedSentencepieceLMDataset:
TOKENIZER_N_FILES = 10
def _get_variant_id(self) -> str:
return f"{self.__class__.__name__}-{self.n_tokens}"
def gzip_line_iterator(self, url: str):
stream = UrlStream(url)
print(f"Opening shard {url}, size {stream.size()}")
for l in gzip.GzipFile(fileobj=stream):
txt = json.loads(l.decode("utf-8"))["text"]
if txt:
yield txt + "<STORY_SEP>"
def get_url(self, index: int, split: Optional[str] = None) -> str:
raise NotImplementedError()
def get_n_shards(self, split: Optional[str] = None) -> int:
raise NotImplementedError()
def get_tokenizer_train_sentences(self):
n_files = min(self.TOKENIZER_N_FILES, self.get_n_shards("train"))
for i in range(n_files):
url = self.get_url(i, "train")
for txt in self.gzip_line_iterator(url):
yield txt
def _chunk_fname(self, index: int) -> str:
return os.path.join(self._chunk_dir, f"chunk_{index}.bin")
def tokenize_chunk(self, chunk_index):
fname = self._chunk_fname(chunk_index)
if not os.path.exists(fname):
print(f"Tokenizing chunk {chunk_index}...")
url = self.get_url(chunk_index)
with open(fname+".tmp", "wb") as out_f:
for l in self.gzip_line_iterator(url):
np.asarray(self.vocabulary(l), dtype=self.data_dtype).tofile(out_f)
os.rename(fname+".tmp", fname)
print(f"Tokenizing chunk {chunk_index} done.")
def do_mmap(self, index: int):
if self.chunk_mmap[index] is None:
self.chunk_mmap[index] = np.memmap(self._chunk_fname(index), dtype=self.data_dtype, mode='r')
def update_data_type(self):
# Avoid unnecessary copying
if self.n_tokens >= 2**31 - 1:
self.data_dtype = np.int64
elif self.n_tokens >= 2**15 - 1:
self.data_dtype = np.int32
elif self.n_tokens >= 2**8:
self.data_dtype = np.int16
else:
self.data_dtype = np.uint8
def get_chunk_sizes(self) -> List[int]:
chunk_sizes = []
for i in range(self._n_chunks):
fn = self._chunk_fname(i)
if os.path.exists(fn):
chunk_sizes.append(os.path.getsize(fn) // self.data_dtype(0).itemsize)
else:
break
return chunk_sizes
def get_ready_tokens(self) -> int:
return sum(self.get_chunk_sizes())
def __init__(self, unroll_len: int, n_extra: int = 1, split: str = 'train',
cache_dir: str = "./cache/", n_tokens: int = 8000, token_limit: Optional[int] = None) -> None:
self.split = split
self.n_tokens = n_tokens
self.unroll_len = unroll_len
self.n_extra = n_extra
self.update_data_type()
self._cache_dir = os.path.join(cache_dir, self.__class__.__name__, self._get_variant_id())
self._chunk_dir = os.path.join(self._cache_dir, "tokenized_chunks", split)
self._n_chunks = self.get_n_shards()
self.chunk_sizes = [0] * self._n_chunks
self.chunk_offsets = [0] * self._n_chunks
self.chunk_mmap = [None] * self._n_chunks
self.last_available_chunk = -1
self.last_accessed_chunk = -1
self.token_limit = int(math.ceil(token_limit)) if token_limit is not None else None
os.makedirs(self._chunk_dir, exist_ok=True)
self._sp_model_name = os.path.join(self._cache_dir, "tokenizer.model")
with LockFile(self._cache_dir + "/lock"):
self.vocabulary = SentencepieceVocabulary(self._sp_model_name, GenToIt(self.get_tokenizer_train_sentences), n_tokens)
print(f"{self.__class__.__name__}: Loaded tokenizer.")
missing = [i for i in range(self._n_chunks) if not os.path.exists(self._chunk_fname(i))]
print(f"{self.__class__.__name__}: {len(missing)} chunks missing")
if missing:
if token_limit is not None:
pool = mp.Pool(min(mp.cpu_count(), len(missing)))
while True:
tokens_ready = self.get_ready_tokens()
if tokens_ready >= token_limit:
print("Token limit reached. No need to tokenize more.")
break
print(f"{self.__class__.__name__}: {tokens_ready/token_limit*100:.2f}% ready.")
chunks_ready = len(self.get_chunk_sizes())
if chunks_ready == 0:
print("Tokenizing first chunk to estimate the number of required chunks...")
pool.map(self.tokenize_chunk, [0])
continue
elif chunks_ready >= self._n_chunks:
print("All chunks ready. No need to tokenize more.")
break
n_estimated = int(math.ceil(chunks_ready * (token_limit / tokens_ready)))
print(f"{self.__class__.__name__}: Tokenizing {n_estimated} estimated chunks...")
pool.map(self.tokenize_chunk, [a for a in range(chunks_ready, n_estimated) if a in missing])
print(f"Limiting to {token_limit} tokens")
missing = missing[:token_limit // self.unroll_len]
del pool
else:
mp.Pool(min(mp.cpu_count(), len(missing))).map(self.tokenize_chunk, missing)
self.chunk_sizes = self.get_chunk_sizes()
self.chunk_offsets = self.chunk_offsets[:len(self.chunk_sizes)]
self.chunk_mmap = self.chunk_mmap[:len(self.chunk_sizes)]
lim_found = False
for i in range(1, len(self.chunk_sizes)):
self.chunk_offsets[i] = self.chunk_offsets[i - 1] + self.chunk_sizes[i]
if self.token_limit is not None and not lim_found and self.chunk_offsets[i] >= self.token_limit:
print(f"{self.__class__.__name__}: Limiting to first {i} chunks because limited to {self.token_limit} tokens")
lim_found = True
def __len__(self):
l = self.linear_len()
if self.token_limit is not None:
l = min(l, self.token_limit)
return l // self.unroll_len
def linear_len(self):
return self.chunk_sizes[-1] + self.chunk_offsets[-1]
def get_linear(self, offset: int, clen: int):
chunk_index = bisect.bisect(self.chunk_offsets, offset) - 1
chunk_offset = offset - self.chunk_offsets[chunk_index]
self.do_mmap(chunk_index)
if chunk_offset + clen > self.chunk_sizes[chunk_index]:
# Wrapping over chunk boundary
next_chunk = (chunk_index + 1) % len(self.chunk_sizes)
self.do_mmap(next_chunk)
d1 = self.chunk_mmap[chunk_index][chunk_offset:]
d2 = self.chunk_mmap[next_chunk][:clen-len(d1)]
r = np.concatenate([d1, d2])
else:
r = self.chunk_mmap[chunk_index][chunk_offset:chunk_offset+clen]
assert r.shape[0] == clen
return r
def __getitem__(self, item: int) -> Dict[str, Any]:
return {
"data": self.get_linear(item * self.unroll_len, self.unroll_len + self.n_extra)
}
| def start_test(self) -> WordLevelLanguageModelTestState: | 3 | 2023-12-13 08:45:02+00:00 | 8k |
Q-Future/Q-Align | q_align/model/modeling_mplug_owl2.py | [
{
"identifier": "MPLUGOwl2Config",
"path": "q_align/model/configuration_mplug_owl2.py",
"snippet": "class MPLUGOwl2Config(LlamaConfig):\n model_type = \"mplug_owl2\"\n def __init__(self, visual_config=None, **kwargs):\n if visual_config is None:\n self.visual_config = DEFAULT_VIS... | from abc import ABC, abstractmethod
from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, CLIPImageProcessor, LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
from .visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel
from .modeling_llama2 import replace_llama_modality_adaptive
from icecream import ic
from PIL import Image
from icecream import ic
import torch
import torch.nn as nn
import copy
import os
import sys | 7,090 | image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_modality_indicators = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_image_features = image_features[cur_image_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device)
new_modality_indicators.append(cur_modality_indicators)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
cur_modality_indicators = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
# Add modality indicator
assert image_token_start == len(cur_input_ids[:image_token_start])
cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long())
cur_modality_indicators.append(torch.ones(len(cur_image_features)).long())
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long())
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
# Modality
cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators]
cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0)
new_modality_indicators.append(cur_modality_indicators)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
# Embedding
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
# Modality
new_modality_indicators_align = []
for cur_modality_indicator in new_modality_indicators:
cur_new_embed = torch.cat((cur_modality_indicator, torch.zeros(max_len - cur_modality_indicator.shape[0], dtype=cur_modality_indicator.dtype, device=cur_modality_indicator.device)), dim=0)
new_modality_indicators_align.append(cur_new_embed)
new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0)
# Label
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
# Attention Mask
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
new_modality_indicators = torch.stack(new_modality_indicators, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels
class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel):
| # Copyright 2023 Haotian Liu & Qinghao Ye (Modified from LLaVA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path)
IGNORE_INDEX = -100
IMAGE_TOKEN_INDEX = -200
DEFAULT_IMAGE_TOKEN = "<|image|>"
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
class MPLUGOwl2MetaModel:
def __init__(self, config):
super(MPLUGOwl2MetaModel, self).__init__(config)
self.vision_model = MplugOwlVisionModel(
MplugOwlVisionConfig(**config.visual_config["visual_model"])
)
self.visual_abstractor = MplugOwlVisualAbstractorModel(
MplugOwlVisualAbstractorConfig(**config.visual_config["visual_abstractor"]), config.hidden_size
)
def get_vision_tower(self):
vision_model = getattr(self, 'vision_model', None)
if type(vision_model) is list:
vision_model = vision_model[0]
return vision_model
def get_visual_abstractor(self):
visual_abstractor = getattr(self, 'visual_abstractor', None)
if type(visual_abstractor) is list:
visual_abstractor = visual_abstractor[0]
return visual_abstractor
class MPLUGOwl2MetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def encode_images(self, images):
image_features = self.get_model().vision_model(images).last_hidden_state
image_features = self.get_model().visual_abstractor(encoder_hidden_states=image_features).last_hidden_state
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
if images is None or input_ids.shape[1] == 1:
if past_key_values is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
multiway_indices = torch.zeros_like(input_ids).long().to(self.device)
return input_ids, multiway_indices, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_modality_indicators = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_image_features = image_features[cur_image_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device)
new_modality_indicators.append(cur_modality_indicators)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
cur_modality_indicators = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
# Add modality indicator
assert image_token_start == len(cur_input_ids[:image_token_start])
cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long())
cur_modality_indicators.append(torch.ones(len(cur_image_features)).long())
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long())
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
# Modality
cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators]
cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0)
new_modality_indicators.append(cur_modality_indicators)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
# Embedding
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
# Modality
new_modality_indicators_align = []
for cur_modality_indicator in new_modality_indicators:
cur_new_embed = torch.cat((cur_modality_indicator, torch.zeros(max_len - cur_modality_indicator.shape[0], dtype=cur_modality_indicator.dtype, device=cur_modality_indicator.device)), dim=0)
new_modality_indicators_align.append(cur_new_embed)
new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0)
# Label
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
# Attention Mask
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
new_modality_indicators = torch.stack(new_modality_indicators, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels
class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel): | config_class = MPLUGOwl2Config | 0 | 2023-12-14 03:36:30+00:00 | 8k |
nox-410/tvm.tl | python/tvm/tir/tensor_intrin/cuda.py | [
{
"identifier": "register_func",
"path": "python/tvm/_ffi/registry.py",
"snippet": "def register_func(func_name, f=None, override=False):\n \"\"\"Register global function\n\n Parameters\n ----------\n func_name : str or function\n The function name\n\n f : function, optional\n ... | from typing import Dict, Tuple
from typing_extensions import Literal
from tvm.script import tir as T
from tvm.tir.function import PrimFunc
from ..._ffi import register_func
from ...runtime import convert
from .. import Cast, IntImm, TensorIntrin | 4,012 | C.elem_offset + tx * lift(local_size_out),
False,
dtype=out_dtype,
)
)
T.evaluate(
T.ptx_mma(
mma_prefix,
"row",
"col",
in_dtype_abbrv,
in_dtype_abbrv,
out_dtype_abbrv,
A.data,
A.elem_offset + tx * lift(local_size),
B.data,
B.elem_offset + tx * lift(local_size) + lift(local_size) // 2,
C.data,
C.elem_offset + tx * lift(local_size_out) + lift(local_size_out) // 2,
False,
dtype=out_dtype,
)
)
return mma_sync_desc, mma_sync_impl
def get_mma_fill_intrin(dtype, local_size):
zero = IntImm("int32", 0).astype(dtype)
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_fill_desc(a: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
i, j = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(i, j))
T.reads()
T.writes(C_warp[thread_id, local_id])
C_warp[thread_id, local_id] = zero
@T.prim_func
def mma_fill_impl(a: T.handle) -> None:
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(T.mma_fill(local_size, C_warp.data, C_warp.elem_offset, dtype=dtype))
return mma_fill_desc, mma_fill_impl
def get_mma_store_intrin(dtype, local_size, scope="global"):
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_store_desc(a: T.handle, c: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
C = T.match_buffer(c, [M_DIM, N_DIM], dtype=dtype, scope=scope)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
v0, v1 = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.reads(C_warp[thread_id, local_id])
T.writes(C[v0, v1])
C[v0, v1] = C_warp[thread_id, local_id]
@T.prim_func
def mma_store_impl(a: T.handle, c: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
C = T.match_buffer(
c, [M_DIM, N_DIM], dtype=dtype, scope=scope, offset_factor=1, strides=[s0, s1]
)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.mma_store(
M_DIM,
N_DIM,
C.access_ptr("w"),
C_warp.data,
C_warp.elem_offset,
s0,
dtype=dtype,
)
)
return mma_store_desc, mma_store_impl
LDMATRIX_16x16_A_INTRIN = "mma.ldmatrix_16x16_a"
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for tensorization on NVIDIA GPU."""
def shared_16x16_to_ldmatrix_32x8_layout(i, j):
thread_id = 4 * (i % 8) + (j % 8) // 2
return thread_id, 4 * (j // 8) + (i // 8) * 2 + (j % 2)
def shared_16x32_to_ldmatrix_32x16_layout(i, j):
thread_id = 4 * (i % 8) + (j % 16) // 4
return thread_id, 8 * (j // 16) + (i // 8) * 4 + j % 4
def shared_32x16_to_ldmatrix_32x16_layout(i, j):
thread_id = (i % 16) // 4 + 4 * (j % 8)
return thread_id, 8 * (j // 8) + (i // 16) * 4 + i % 4
@register_func("tir.index_map.shared_16x16_to_ldmatrix_32x8_layout")
def index_map_shared_16x16_to_ldmatrix_32x8_layout(ind):
i, j = ind[0], ind[1]
thread_id, local_id = shared_16x16_to_ldmatrix_32x8_layout(i, j)
return convert([thread_id, local_id])
lift = convert
M_DIM = 16
N_DIM = 16
WARP_SIZE = 32
HALF_WARP = WARP_SIZE // 2
HALF_WARP_expr = lift(HALF_WARP)
def get_ldmatrix_intrin(k_dim, dtype, is_b, transposed, shared_scope="shared"):
local_size = (M_DIM * k_dim) // WARP_SIZE
shared_offset = None
index_map = None
if transposed:
assert is_b, "Transposed A matrix not supported"
ldmatrix_col_major = is_b and not transposed
if k_dim == 16:
assert dtype == "float16"
index_map = shared_16x16_to_ldmatrix_32x8_layout
if transposed:
shared_offset = (
lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr)
+ stride * (tx % 8)
+ 8 * ((tx % HALF_WARP_expr) // 8)
)
else:
shared_offset = lambda tx, stride: stride * (tx % HALF_WARP_expr) + 8 * (
tx // HALF_WARP_expr
)
else:
assert (
k_dim == 32 and dtype == "int8"
), "Only k_dim == 16 (float16) or k_dim == 32 (int8) supported for now"
if ldmatrix_col_major:
index_map = shared_32x16_to_ldmatrix_32x16_layout
# A dummy offset, ldmatrix cannot be used for int8 + trans case.
# We still use the ldmatrix intrinsic, but lower it to a manual loop in the codegen.
# Only the stride information is required.
shared_offset = lambda _, stride: stride
elif is_b and transposed:
index_map = shared_16x32_to_ldmatrix_32x16_layout
shared_offset = (
lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr)
+ (tx % 8) * stride
+ 16 * ((tx % HALF_WARP_expr) // 8)
)
else:
index_map = shared_16x32_to_ldmatrix_32x16_layout
shared_offset = lambda tx, stride: stride * (tx % 16) + 16 * (tx // 16)
assert index_map and shared_offset
if is_b and not transposed:
row_dim = k_dim
col_dim = M_DIM
else:
row_dim = M_DIM
col_dim = k_dim
shmem_shape = (row_dim, col_dim)
offset_factor = col_dim
@T.prim_func
def ldmatrix_desc(warp_handle: T.handle, shared_handle: T.handle) -> None:
shared = T.match_buffer(
shared_handle,
shmem_shape,
dtype,
align=64,
offset_factor=offset_factor,
scope=shared_scope,
)
warp = T.match_buffer(
warp_handle,
(WARP_SIZE, local_size),
dtype,
align=64,
offset_factor=offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(shared[0:row_dim, 0:col_dim])
T.writes(warp[0:WARP_SIZE, 0:local_size])
for ax0, ax1 in T.grid(row_dim, col_dim):
with T.block("shared_warp"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(shared[v0, v1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.writes(warp[thread_id, local_id])
warp[thread_id, local_id] = shared[v0, v1]
@T.prim_func
def ldmatrix_impl(warp_handle: T.handle, shared_handle: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
shared = T.match_buffer(
shared_handle,
shmem_shape,
dtype,
align=64,
offset_factor=offset_factor,
scope=shared_scope,
strides=[s0, s1],
)
warp = T.match_buffer(
warp_handle,
(WARP_SIZE, local_size),
dtype,
align=64,
offset_factor=offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(shared[0:row_dim, 0:col_dim])
T.writes(warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.ptx_ldmatrix(
ldmatrix_col_major,
4, # Always load 4 matrices
".b16",
warp.data,
warp.elem_offset + lift(local_size) * tx,
shared.access_ptr("r"),
shared_offset(tx, s0),
dtype=dtype,
)
)
return ldmatrix_desc, ldmatrix_impl
def get_mma_intrin(k_dim, out_dtype, b_transposed):
local_size = (M_DIM * k_dim) // WARP_SIZE
local_size_out = (M_DIM * N_DIM) // 32
index_map_C = shared_16x16_to_ldmatrix_32x8_layout
if k_dim == 16:
index_map_A = shared_16x16_to_ldmatrix_32x8_layout
index_map_B = shared_16x16_to_ldmatrix_32x8_layout
mma_prefix = "m16n8k16"
elif k_dim == 32 and b_transposed:
index_map_A = index_map_B = shared_16x32_to_ldmatrix_32x16_layout
mma_prefix = "m16n8k32"
elif k_dim == 32 and not b_transposed:
index_map_A = shared_16x32_to_ldmatrix_32x16_layout
index_map_B = shared_32x16_to_ldmatrix_32x16_layout
mma_prefix = "m16n8k32"
else:
assert False
out_dtype_abbrv = {"float16": "fp16", "float32": "fp32", "int32": "int32"}[out_dtype]
if out_dtype in ["float16", "float32"]:
in_dtype = "float16"
in_dtype_abbrv = "fp16"
else:
in_dtype = "int8"
in_dtype_abbrv = "int8"
def maybe_cast(v):
if out_dtype in ["float32", "int32"]:
return Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
A_offset_factor = k_dim
B_offset_factor = maybe_swap(k_dim, N_DIM)[-1]
out_offset_factor = N_DIM
@T.prim_func
def mma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=A_offset_factor,
scope="warp",
)
B = T.match_buffer(
b,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=B_offset_factor,
scope="warp",
)
C = T.match_buffer(
c,
(WARP_SIZE, local_size_out),
out_dtype,
align=64,
offset_factor=out_offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(
C[0:WARP_SIZE, 0:local_size_out],
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
for i, j, k in T.grid(M_DIM, N_DIM, k_dim):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
b_row_ind, b_col_ind = T.meta_var(maybe_swap(k, j))
thread_id_C, local_id_C = T.meta_var(index_map_C(i, j))
thread_id_A, local_id_A = T.meta_var(index_map_A(i, k))
thread_id_B, local_id_B = T.meta_var(index_map_B(b_row_ind, b_col_ind))
T.reads(
C[thread_id_C, local_id_C],
A[thread_id_A, local_id_A],
B[thread_id_B, local_id_B],
)
T.writes(C[thread_id_C, local_id_C])
C[thread_id_C, local_id_C] += maybe_cast(
A[thread_id_A, local_id_A]
) * maybe_cast(B[thread_id_B, local_id_B])
@T.prim_func
def mma_sync_impl(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=A_offset_factor,
scope="warp",
)
B = T.match_buffer(
b,
(WARP_SIZE, local_size),
in_dtype,
align=64,
offset_factor=B_offset_factor,
scope="warp",
)
C = T.match_buffer(
c,
(WARP_SIZE, local_size_out),
out_dtype,
align=64,
offset_factor=out_offset_factor,
scope="warp",
)
with T.block("root"):
T.reads(
C[0:WARP_SIZE, 0:local_size_out],
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.ptx_mma(
mma_prefix,
"row",
"col",
in_dtype_abbrv,
in_dtype_abbrv,
out_dtype_abbrv,
A.data,
A.elem_offset + tx * lift(local_size),
B.data,
B.elem_offset + tx * lift(local_size),
C.data,
C.elem_offset + tx * lift(local_size_out),
False,
dtype=out_dtype,
)
)
T.evaluate(
T.ptx_mma(
mma_prefix,
"row",
"col",
in_dtype_abbrv,
in_dtype_abbrv,
out_dtype_abbrv,
A.data,
A.elem_offset + tx * lift(local_size),
B.data,
B.elem_offset + tx * lift(local_size) + lift(local_size) // 2,
C.data,
C.elem_offset + tx * lift(local_size_out) + lift(local_size_out) // 2,
False,
dtype=out_dtype,
)
)
return mma_sync_desc, mma_sync_impl
def get_mma_fill_intrin(dtype, local_size):
zero = IntImm("int32", 0).astype(dtype)
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_fill_desc(a: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
i, j = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(i, j))
T.reads()
T.writes(C_warp[thread_id, local_id])
C_warp[thread_id, local_id] = zero
@T.prim_func
def mma_fill_impl(a: T.handle) -> None:
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(T.mma_fill(local_size, C_warp.data, C_warp.elem_offset, dtype=dtype))
return mma_fill_desc, mma_fill_impl
def get_mma_store_intrin(dtype, local_size, scope="global"):
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_store_desc(a: T.handle, c: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
C = T.match_buffer(c, [M_DIM, N_DIM], dtype=dtype, scope=scope)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
v0, v1 = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.reads(C_warp[thread_id, local_id])
T.writes(C[v0, v1])
C[v0, v1] = C_warp[thread_id, local_id]
@T.prim_func
def mma_store_impl(a: T.handle, c: T.handle) -> None:
s0 = T.int32()
s1 = T.int32()
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
C = T.match_buffer(
c, [M_DIM, N_DIM], dtype=dtype, scope=scope, offset_factor=1, strides=[s0, s1]
)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.mma_store(
M_DIM,
N_DIM,
C.access_ptr("w"),
C_warp.data,
C_warp.elem_offset,
s0,
dtype=dtype,
)
)
return mma_store_desc, mma_store_impl
LDMATRIX_16x16_A_INTRIN = "mma.ldmatrix_16x16_a" | TensorIntrin.register(LDMATRIX_16x16_A_INTRIN, *get_ldmatrix_intrin(16, "float16", False, False)) | 4 | 2023-12-14 02:37:47+00:00 | 8k |
berlino/gated_linear_attention | kernels/inter_chunk_contribution/fn.py | [
{
"identifier": "PreprocessCumSum_GK",
"path": "kernels/inter_chunk_contribution/preprocess_cumsum_gk.py",
"snippet": "class PreprocessCumSum_GK(torch.autograd.Function):\n @staticmethod\n def forward(ctx, q, k, gk, normalizer_gk=8, clamp_min=-3):\n q = q.contiguous()\n k = k.conti... | from .preprocess_cumsum_gk import PreprocessCumSum_GK
from .preprocess_cumsum_gv import PreprocessCumSum_GV
from .chunk_scan_triton_full import Chunk_memory_update_full
from .chunk_scan_triton_only_gk import Chunk_memory_update_only_gk
from .chunk_scan_triton_only_gv import Chunk_memory_update_only_gv
from .chunk_scan_triton_no_decay import Chunk_memory_update_no_decay | 4,662 |
def inter_chunk_onc(query, key, value, gk, gv, normalizer_gk=16, normalizer_gv=16, clam_min=-3):
if gk is not None:
g_key_cumsum, reduce_key, q_exp, g_key_last_exp = PreprocessCumSum_GK.apply(query, key, gk, normalizer_gk, clam_min)
else:
reduce_key = key
q_exp = None
g_key_cumsum = None
g_key_last_exp = None
# gv_cumsum, v_reduce, gv_cumsum_exp, gv_last_exp
if gv is not None:
g_value_cumsum, reduce_value, g_value_cumsum_exp, g_value_last_exp = PreprocessCumSum_GV.apply( value, gv, normalizer_gv, clam_min)
else:
reduce_value = value
g_value_cumsum = None
g_value_last_exp = None
to_add = reduce_key.transpose(-1, -2) @ reduce_value
if gk is not None and gv is not None:
memory_cache = Chunk_memory_update_full.apply(g_key_last_exp, g_value_last_exp, to_add)
inter_chunk_contribution = ((q_exp) @ memory_cache) * g_value_cumsum_exp
elif gk is None and gv is not None:
memory_cache = Chunk_memory_update_only_gv.apply(g_value_last_exp, to_add)
inter_chunk_contribution = ((query) @ memory_cache) * g_value_cumsum_exp
elif gk is not None and gv is None:
|
def inter_chunk_onc(query, key, value, gk, gv, normalizer_gk=16, normalizer_gv=16, clam_min=-3):
if gk is not None:
g_key_cumsum, reduce_key, q_exp, g_key_last_exp = PreprocessCumSum_GK.apply(query, key, gk, normalizer_gk, clam_min)
else:
reduce_key = key
q_exp = None
g_key_cumsum = None
g_key_last_exp = None
# gv_cumsum, v_reduce, gv_cumsum_exp, gv_last_exp
if gv is not None:
g_value_cumsum, reduce_value, g_value_cumsum_exp, g_value_last_exp = PreprocessCumSum_GV.apply( value, gv, normalizer_gv, clam_min)
else:
reduce_value = value
g_value_cumsum = None
g_value_last_exp = None
to_add = reduce_key.transpose(-1, -2) @ reduce_value
if gk is not None and gv is not None:
memory_cache = Chunk_memory_update_full.apply(g_key_last_exp, g_value_last_exp, to_add)
inter_chunk_contribution = ((q_exp) @ memory_cache) * g_value_cumsum_exp
elif gk is None and gv is not None:
memory_cache = Chunk_memory_update_only_gv.apply(g_value_last_exp, to_add)
inter_chunk_contribution = ((query) @ memory_cache) * g_value_cumsum_exp
elif gk is not None and gv is None: | memory_cache = Chunk_memory_update_only_gk.apply(g_key_last_exp, to_add) | 3 | 2023-12-11 18:13:44+00:00 | 8k |
kakaobrain/honeybee | serve/web_server.py | [
{
"identifier": "default_conversation",
"path": "serve/conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n ... | import argparse
import json
import os
import time
import gradio as gr
import requests
import torch
from .conversation import default_conversation
from .gradio_css import code_highlight_css
from .model_utils import post_process_code
from .model_worker import Honeybee_Server
from .serve_utils import add_text # noqa: F401
from .serve_utils import regenerate # noqa: F401
from .serve_utils import (
after_process_image,
clear_history,
disable_btn,
downvote_last_response,
enable_btn,
flag_last_response,
get_window_url_params,
init,
no_change_btn,
upvote_last_response,
) | 4,786 |
with gr.Row():
with gr.Column(scale=3):
imagebox = gr.Image(type="pil")
# dataset for selecting OwlEval data
owleval = load_jsonl("data/OwlEval/questions.jsonl")
owleval_data = gr.Dataset(
components=[imagebox, textbox],
label="OwlEval Examples",
samples=[
[os.path.join("data/OwlEval", "images", it["image"]), it["question"]]
for it in owleval
],
)
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
max_output_tokens = gr.Slider(
0, 1024, 512, step=64, interactive=True, label="Max output tokens"
)
temperature = gr.Slider(
0, 1, 1, step=0.1, interactive=True, label="Temperature"
)
top_k = gr.Slider(1, 5, 3, step=1, interactive=True, label="Top K")
top_p = gr.Slider(0, 1, 0.9, step=0.1, interactive=True, label="Top p")
length_penalty = gr.Slider(
1, 5, 1, step=0.1, interactive=True, label="length_penalty"
)
num_beams = gr.Slider(1, 5, 1, step=1, interactive=True, label="Beam Size")
no_repeat_ngram_size = gr.Slider(
1, 5, 2, step=1, interactive=True, label="no_repeat_ngram_size"
)
do_sample = gr.Checkbox(interactive=True, value=True, label="do_sample")
videobox = gr.Video(visible=False) # [M-LLM] currently, we do not support video
with gr.Column(scale=6):
chatbot = gr.Chatbot(elem_id="chatbot", visible=False, height=1000)
with gr.Row():
with gr.Column(scale=8):
textbox.render()
with gr.Column(scale=1, min_width=60):
submit_btn = gr.Button(value="Submit", visible=False)
with gr.Row(visible=False) as button_row:
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
gr.Markdown(learn_more_markdown)
url_params = gr.JSON(visible=False)
owleval_data.click(fn=set_dataset, inputs=owleval_data, outputs=owleval_data.components)
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
parameter_list = [
max_output_tokens,
temperature,
top_k,
top_p,
num_beams,
no_repeat_ngram_size,
length_penalty,
do_sample,
]
upvote_btn.click(
upvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn]
)
downvote_btn.click(
downvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn]
)
flag_btn.click(flag_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn])
regenerate_btn.click(
regenerate_http_bot,
[state] + parameter_list,
[state, chatbot, textbox, imagebox, videobox] + btn_list,
)
clear_btn.click(
clear_history, None, [state, chatbot, textbox, imagebox, videobox] + btn_list
)
textbox.submit(
add_text_http_bot,
[state, textbox, imagebox, videobox] + parameter_list,
[state, chatbot, textbox, imagebox, videobox] + btn_list,
)
submit_btn.click(
add_text_http_bot,
[state, textbox, imagebox, videobox] + parameter_list,
[state, chatbot, textbox, imagebox, videobox] + btn_list,
)
demo.load(
load_demo,
[url_params],
[state, chatbot, textbox, submit_btn, button_row, parameter_row],
_js=get_window_url_params,
)
return demo
if __name__ == "__main__":
io = init()
cur_dir = os.path.dirname(os.path.abspath(__file__))
log_dir = cur_dir[:-9] + "log"
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--debug", action="store_true", help="using debug mode")
parser.add_argument("--port", type=int)
parser.add_argument("--concurrency-count", type=int, default=100)
parser.add_argument("--base-model", type=str, default="checkpoints/7B-C-Abs-M144/last")
parser.add_argument("--load-8bit", action="store_true", help="using 8bit mode")
parser.add_argument("--bf16", action="store_true", help="using 8bit mode")
args = parser.parse_args()
print(" >>> Init server")
| # Reference: https://huggingface.co/spaces/MAGAer13/mPLUG-Owl/tree/main
def load_jsonl(filename):
with open(filename, "r", encoding="utf-8") as f:
return [json.loads(line.strip("\n")) for line in f.readlines()]
def set_dataset(example: list):
return gr.Image.update(value=example[0]), gr.Textbox.update(value=example[1])
def set_example_text_input(example_text: str) -> dict:
# for the example query texts
return gr.Textbox.update(value=example_text[0])
def load_demo(url_params, request: gr.Request):
dropdown_update = gr.Dropdown.update(visible=True)
state = default_conversation.copy()
return (
state,
dropdown_update,
gr.Chatbot.update(visible=True),
gr.Textbox.update(visible=True),
gr.Button.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
def add_text_http_bot(
state,
text,
image,
video,
max_output_tokens,
temperature,
top_k,
top_p,
num_beams,
no_repeat_ngram_size,
length_penalty,
do_sample,
request: gr.Request,
):
if len(text) <= 0 and (image is None or video is None):
state.skip_next = True
return (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
if image is not None:
if "<image>" not in text:
text = text + "\n<image>"
text = (text, image)
if video is not None:
num_frames = 4
if "<image>" not in text:
text = text + "\n<image>" * num_frames
text = (text, video)
state.append_message(state.roles[0], text)
state.append_message(state.roles[1], None)
state.skip_next = False
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
if state.skip_next:
# This generate call is skipped due to invalid inputs
yield (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
return
prompt = after_process_image(state.get_prompt())
images = state.get_images()
data = {
"text_input": prompt,
"images": images if len(images) > 0 else [],
"generation_config": {
"top_k": int(top_k),
"top_p": float(top_p),
"num_beams": int(num_beams),
"no_repeat_ngram_size": int(no_repeat_ngram_size),
"length_penalty": float(length_penalty),
"do_sample": bool(do_sample),
"temperature": float(temperature),
"max_new_tokens": min(int(max_output_tokens), 1536),
},
}
state.messages[-1][-1] = "▌"
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
try:
for chunk in model.predict(data):
if chunk:
if chunk[1]:
output = chunk[0].strip()
output = post_process_code(output)
state.messages[-1][-1] = output + "▌"
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
else:
output = chunk[0].strip()
state.messages[-1][-1] = output
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
time.sleep(0.03)
except requests.exceptions.RequestException as e: # noqa: F841
state.messages[-1][
-1
] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5
def regenerate_http_bot(
state,
max_output_tokens,
temperature,
top_k,
top_p,
num_beams,
no_repeat_ngram_size,
length_penalty,
do_sample,
request: gr.Request,
):
state.messages[-1][-1] = None
state.skip_next = False
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
prompt = after_process_image(state.get_prompt())
images = state.get_images()
data = {
"text_input": prompt,
"images": images if len(images) > 0 else [],
"generation_config": {
"top_k": int(top_k),
"top_p": float(top_p),
"num_beams": int(num_beams),
"no_repeat_ngram_size": int(no_repeat_ngram_size),
"length_penalty": float(length_penalty),
"do_sample": bool(do_sample),
"temperature": float(temperature),
"max_new_tokens": min(int(max_output_tokens), 1536),
},
}
state.messages[-1][-1] = "▌"
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
try:
for chunk in model.predict(data):
if chunk:
if chunk[1]:
output = chunk[0].strip()
output = post_process_code(output)
state.messages[-1][-1] = output + "▌"
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
else:
output = chunk[0].strip()
state.messages[-1][-1] = output
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
time.sleep(0.03)
except requests.exceptions.RequestException as e: # noqa: F841
state.messages[-1][
-1
] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5
title_markdown = """
**Notice**: The output is generated by top-k sampling scheme and may involve some randomness.
"""
tos_markdown = """
### Terms of use
By using this service, users are required to agree to the following terms:
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
**Copyright 2023 Alibaba DAMO Academy.**
"""
learn_more_markdown = """
### License
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
"""
css = (
code_highlight_css
+ """
pre {
white-space: pre-wrap; /* Since CSS 2.1 */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
word-wrap: break-word; /* Internet Explorer 5.5+ */
}
"""
)
def build_demo(model_name: str = "M-LLM"):
title_model_name = f"""<h1 align="center">{model_name} </h1>"""
# with gr.Blocks(title="mPLUG-Owl🦉", theme=gr.themes.Base(), css=css) as demo:
textbox = gr.Textbox(
show_label=False, placeholder="Enter text and press ENTER", visible=False, container=False
)
with gr.Blocks(title="M-LLM", css=css) as demo:
state = gr.State()
gr.Markdown(title_model_name)
gr.Markdown(title_markdown)
with gr.Row():
with gr.Column(scale=3):
imagebox = gr.Image(type="pil")
# dataset for selecting OwlEval data
owleval = load_jsonl("data/OwlEval/questions.jsonl")
owleval_data = gr.Dataset(
components=[imagebox, textbox],
label="OwlEval Examples",
samples=[
[os.path.join("data/OwlEval", "images", it["image"]), it["question"]]
for it in owleval
],
)
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
max_output_tokens = gr.Slider(
0, 1024, 512, step=64, interactive=True, label="Max output tokens"
)
temperature = gr.Slider(
0, 1, 1, step=0.1, interactive=True, label="Temperature"
)
top_k = gr.Slider(1, 5, 3, step=1, interactive=True, label="Top K")
top_p = gr.Slider(0, 1, 0.9, step=0.1, interactive=True, label="Top p")
length_penalty = gr.Slider(
1, 5, 1, step=0.1, interactive=True, label="length_penalty"
)
num_beams = gr.Slider(1, 5, 1, step=1, interactive=True, label="Beam Size")
no_repeat_ngram_size = gr.Slider(
1, 5, 2, step=1, interactive=True, label="no_repeat_ngram_size"
)
do_sample = gr.Checkbox(interactive=True, value=True, label="do_sample")
videobox = gr.Video(visible=False) # [M-LLM] currently, we do not support video
with gr.Column(scale=6):
chatbot = gr.Chatbot(elem_id="chatbot", visible=False, height=1000)
with gr.Row():
with gr.Column(scale=8):
textbox.render()
with gr.Column(scale=1, min_width=60):
submit_btn = gr.Button(value="Submit", visible=False)
with gr.Row(visible=False) as button_row:
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
gr.Markdown(learn_more_markdown)
url_params = gr.JSON(visible=False)
owleval_data.click(fn=set_dataset, inputs=owleval_data, outputs=owleval_data.components)
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
parameter_list = [
max_output_tokens,
temperature,
top_k,
top_p,
num_beams,
no_repeat_ngram_size,
length_penalty,
do_sample,
]
upvote_btn.click(
upvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn]
)
downvote_btn.click(
downvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn]
)
flag_btn.click(flag_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn])
regenerate_btn.click(
regenerate_http_bot,
[state] + parameter_list,
[state, chatbot, textbox, imagebox, videobox] + btn_list,
)
clear_btn.click(
clear_history, None, [state, chatbot, textbox, imagebox, videobox] + btn_list
)
textbox.submit(
add_text_http_bot,
[state, textbox, imagebox, videobox] + parameter_list,
[state, chatbot, textbox, imagebox, videobox] + btn_list,
)
submit_btn.click(
add_text_http_bot,
[state, textbox, imagebox, videobox] + parameter_list,
[state, chatbot, textbox, imagebox, videobox] + btn_list,
)
demo.load(
load_demo,
[url_params],
[state, chatbot, textbox, submit_btn, button_row, parameter_row],
_js=get_window_url_params,
)
return demo
if __name__ == "__main__":
io = init()
cur_dir = os.path.dirname(os.path.abspath(__file__))
log_dir = cur_dir[:-9] + "log"
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--debug", action="store_true", help="using debug mode")
parser.add_argument("--port", type=int)
parser.add_argument("--concurrency-count", type=int, default=100)
parser.add_argument("--base-model", type=str, default="checkpoints/7B-C-Abs-M144/last")
parser.add_argument("--load-8bit", action="store_true", help="using 8bit mode")
parser.add_argument("--bf16", action="store_true", help="using 8bit mode")
args = parser.parse_args()
print(" >>> Init server") | model = Honeybee_Server( | 3 | 2023-12-06 14:48:41+00:00 | 8k |
taikinman/langrila | src/langrila/chat_module/function_calling.py | [
{
"identifier": "BaseConversationLengthAdjuster",
"path": "src/langrila/base.py",
"snippet": "class BaseConversationLengthAdjuster(ABC):\n @abstractmethod\n def run(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:\n raise NotImplementedError\n\n def __call__(self, messages:... | import asyncio
import json
from typing import Callable, Optional
from pydantic import BaseModel, field_validator
from ..base import BaseConversationLengthAdjuster, BaseFilter, BaseModule
from ..conversation_adjuster.truncate import OldConversationTruncationModule
from ..message import Message
from ..model_config import _NEWER_MODEL_CONFIG, _OLDER_MODEL_CONFIG, MODEL_CONFIG, MODEL_POINT
from ..result import FunctionCallingResults, ToolOutput
from ..usage import Usage
from ..utils import get_async_client, get_client, get_token_limit, make_batch | 5,378 | max_retries: int = 2,
max_tokens: int = 2048,
seed: Optional[int] = None,
) -> None:
assert api_type in ["openai", "azure"], "api_type must be 'openai' or 'azure'."
if api_type == "azure":
assert (
api_version and endpoint_env_name and deployment_id_env_name
), "api_version, endpoint_env_name, and deployment_id_env_name must be specified for Azure API."
self.api_key_env_name = api_key_env_name
self.organization_id_env_name = organization_id_env_name
self.api_type = api_type
self.api_version = api_version
self.endpoint_env_name = endpoint_env_name
self.deployment_id_env_name = deployment_id_env_name
self.model_name = model_name
self.timeout = timeout
self.max_retries = max_retries
self.tools = {f.__name__: f for f in tools}
_tool_names_from_config = {f.name for f in tool_configs}
assert (
len(_tool_names_from_config ^ set(self.tools.keys())) == 0
), f"tool names in tool_configs must be the same as the function names in tools. tool names in tool_configs: {_tool_names_from_config}, function names in tools: {set(self.tools.keys())}"
self.tool_choice = tool_choice
self.max_tokens = max_tokens
self.additional_inputs = {}
if model_name in _NEWER_MODEL_CONFIG.keys():
self.seed = seed
self.additional_inputs["seed"] = seed
self.tool_configs = [f.model_dump() for f in tool_configs]
self.additional_inputs["tools"] = self.tool_configs
self.additional_inputs["tool_choice"] = self.tool_choice
else:
if seed:
print(
f"seed is ignored because it's not supported for {model_name} (api_type:{api_type})"
)
self.tool_configs = [f.model_dump()["function"] for f in tool_configs]
self.additional_inputs["functions"] = self.tool_configs
self.additional_inputs["function_call"] = self.tool_choice
def run(self, messages: list[dict[str, str]]) -> FunctionCallingResults:
if len(messages) == 0:
raise ValueError("messages must not be empty.")
client = get_client(
api_key_env_name=self.api_key_env_name,
organization_id_env_name=self.organization_id_env_name,
api_version=self.api_version,
endpoint_env_name=self.endpoint_env_name,
deployment_id_env_name=self.deployment_id_env_name,
api_type=self.api_type,
max_retries=self.max_retries,
timeout=self.timeout,
)
response = client.chat.completions.create(
model=self.model_name,
messages=messages if isinstance(messages, list) else [messages],
temperature=0,
max_tokens=self.max_tokens,
top_p=0,
frequency_penalty=0,
presence_penalty=0,
stop=None,
**self.additional_inputs,
)
usage = Usage()
usage += response.usage
if self.model_name in _NEWER_MODEL_CONFIG.keys():
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
results = []
if tool_calls is not None:
for tool_call in tool_calls:
call_id = tool_call.id
funcname = tool_call.function.name
args = tool_call.function.arguments
func_out = self.tools[funcname](**json.loads(args))
output = ToolOutput(
call_id=call_id,
funcname=funcname,
args=args,
output=func_out,
)
results.append(output)
return FunctionCallingResults(usage=usage, results=results, prompt=messages)
elif self.model_name in _OLDER_MODEL_CONFIG.keys():
response_message = response.choices[0].message
function_call = response_message.function_call
output = []
if function_call is not None:
funcname = function_call.name
args = function_call.arguments
func_out = self.tools[funcname](**json.loads(args))
output += [
ToolOutput(
call_id=None,
funcname=funcname,
args=args,
output=func_out,
)
]
return FunctionCallingResults(usage=usage, results=output, prompt=messages)
async def arun(self, messages: list[dict[str, str]]) -> FunctionCallingResults:
if len(messages) == 0:
raise ValueError("messages must not be empty.")
|
class ToolProperty(BaseModel):
name: str
type: str
description: str
def model_dump(self):
return {self.name: super().model_dump(exclude=["name"])}
@field_validator("type")
def check_type_value(cls, v):
if v not in {"string", "number", "boolean"}:
raise ValueError("type must be one of string or number.")
return v
class ToolParameter(BaseModel):
type: str = "object"
properties: list[ToolProperty]
required: Optional[list[str]] = None
def model_dump(self):
dumped = super().model_dump(exclude=["properties", "required"])
_properties = {}
for p in self.properties:
_properties.update(p.model_dump())
dumped["properties"] = _properties
if self.required is not None:
dumped["required"] = self.required
return dumped
@field_validator("type")
def check_type_value(cls, v):
if v not in {"object"}:
raise ValueError("supported type is only object")
return v
@field_validator("required")
def check_required_value(cls, required, values):
properties = values.data["properties"]
property_names = {p.name for p in properties}
if required is not None:
for r in required:
if r not in property_names:
raise ValueError(f"required property '{r}' is not defined in properties.")
return required
class ToolConfig(BaseModel):
name: str
type: str = "function"
description: str
parameters: ToolParameter
def model_dump(self):
dumped = super().model_dump(exclude=["parameters", "type"])
dumped["parameters"] = self.parameters.model_dump()
return {"type": self.type, self.type: dumped}
@field_validator("type")
def check_type_value(cls, v):
if v not in {"function"}:
raise ValueError("supported type is only function")
return v
class BaseFunctionCallingModule(BaseModule):
def __init__(
self,
api_key_env_name: str,
model_name: str,
tools: list[Callable],
tool_configs: list[ToolConfig],
tool_choice: str = "auto",
api_type: str = "openai",
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
timeout: int = 30,
max_retries: int = 2,
max_tokens: int = 2048,
seed: Optional[int] = None,
) -> None:
assert api_type in ["openai", "azure"], "api_type must be 'openai' or 'azure'."
if api_type == "azure":
assert (
api_version and endpoint_env_name and deployment_id_env_name
), "api_version, endpoint_env_name, and deployment_id_env_name must be specified for Azure API."
self.api_key_env_name = api_key_env_name
self.organization_id_env_name = organization_id_env_name
self.api_type = api_type
self.api_version = api_version
self.endpoint_env_name = endpoint_env_name
self.deployment_id_env_name = deployment_id_env_name
self.model_name = model_name
self.timeout = timeout
self.max_retries = max_retries
self.tools = {f.__name__: f for f in tools}
_tool_names_from_config = {f.name for f in tool_configs}
assert (
len(_tool_names_from_config ^ set(self.tools.keys())) == 0
), f"tool names in tool_configs must be the same as the function names in tools. tool names in tool_configs: {_tool_names_from_config}, function names in tools: {set(self.tools.keys())}"
self.tool_choice = tool_choice
self.max_tokens = max_tokens
self.additional_inputs = {}
if model_name in _NEWER_MODEL_CONFIG.keys():
self.seed = seed
self.additional_inputs["seed"] = seed
self.tool_configs = [f.model_dump() for f in tool_configs]
self.additional_inputs["tools"] = self.tool_configs
self.additional_inputs["tool_choice"] = self.tool_choice
else:
if seed:
print(
f"seed is ignored because it's not supported for {model_name} (api_type:{api_type})"
)
self.tool_configs = [f.model_dump()["function"] for f in tool_configs]
self.additional_inputs["functions"] = self.tool_configs
self.additional_inputs["function_call"] = self.tool_choice
def run(self, messages: list[dict[str, str]]) -> FunctionCallingResults:
if len(messages) == 0:
raise ValueError("messages must not be empty.")
client = get_client(
api_key_env_name=self.api_key_env_name,
organization_id_env_name=self.organization_id_env_name,
api_version=self.api_version,
endpoint_env_name=self.endpoint_env_name,
deployment_id_env_name=self.deployment_id_env_name,
api_type=self.api_type,
max_retries=self.max_retries,
timeout=self.timeout,
)
response = client.chat.completions.create(
model=self.model_name,
messages=messages if isinstance(messages, list) else [messages],
temperature=0,
max_tokens=self.max_tokens,
top_p=0,
frequency_penalty=0,
presence_penalty=0,
stop=None,
**self.additional_inputs,
)
usage = Usage()
usage += response.usage
if self.model_name in _NEWER_MODEL_CONFIG.keys():
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
results = []
if tool_calls is not None:
for tool_call in tool_calls:
call_id = tool_call.id
funcname = tool_call.function.name
args = tool_call.function.arguments
func_out = self.tools[funcname](**json.loads(args))
output = ToolOutput(
call_id=call_id,
funcname=funcname,
args=args,
output=func_out,
)
results.append(output)
return FunctionCallingResults(usage=usage, results=results, prompt=messages)
elif self.model_name in _OLDER_MODEL_CONFIG.keys():
response_message = response.choices[0].message
function_call = response_message.function_call
output = []
if function_call is not None:
funcname = function_call.name
args = function_call.arguments
func_out = self.tools[funcname](**json.loads(args))
output += [
ToolOutput(
call_id=None,
funcname=funcname,
args=args,
output=func_out,
)
]
return FunctionCallingResults(usage=usage, results=output, prompt=messages)
async def arun(self, messages: list[dict[str, str]]) -> FunctionCallingResults:
if len(messages) == 0:
raise ValueError("messages must not be empty.")
| client = get_async_client( | 12 | 2023-12-10 09:42:35+00:00 | 8k |
Open-All-Scale-Causal-Engine/OpenASCE | openasce/discovery/search_discovery/search_discovery.py | [
{
"identifier": "CausalGraph",
"path": "openasce/discovery/causal_graph.py",
"snippet": "class CausalGraph(object):\n \"\"\"Causal Graph Class\n\n Represent the casual graph\n\n \"\"\"\n\n DEFAULT_COLUMN_NAME_PREFIX = \"x\"\n\n def __init__(self, names=[], bn=None, w: np.ndarray = None):\... | from typing import Callable, Tuple, Union
from openasce.discovery.causal_graph import CausalGraph
from openasce.discovery.discovery import Discovery
from openasce.discovery.search_discovery.search_strategy import Strategy
from openasce.utils.logger import logger
import numpy as np | 5,918 | # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class CausalSearchDiscovery(Discovery):
"""Execute the causal inference by search method
Attributes:
"""
def __init__(self) -> None:
"""Constructor
Arguments:
Returns:
"""
super().__init__()
def fit(self, *, X: Union[np.ndarray, Callable], **kwargs) -> None:
"""Feed the sample data
Arguments:
X (num of samples, features or callable returning np.ndarray): samples
Returns:
"""
self._data = X() if callable(X) else X
if isinstance(self._data, np.ndarray):
if self.node_names and len(self.node_names) == self._data.shape[1]:
pass
elif self.node_names:
raise ValueError(
f"The number of node does NOT match the column num of samples."
)
else:
logger.info(
f"No node name specified. Use arbitrary names like x0, x1..."
)
self.node_names = [f"x{i}" for i in range(self._data.shape[1])]
elif isinstance(self._data, dict):
self.node_names = self._data.get("node_names")
self._data = self._data.get("data")
elif isinstance(self._data, tuple):
self.node_names = [self._data[0]]
self._data = self._data[1]
else:
raise ValueError(f"No reasonal input data. {type(self._data)}")
| # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class CausalSearchDiscovery(Discovery):
"""Execute the causal inference by search method
Attributes:
"""
def __init__(self) -> None:
"""Constructor
Arguments:
Returns:
"""
super().__init__()
def fit(self, *, X: Union[np.ndarray, Callable], **kwargs) -> None:
"""Feed the sample data
Arguments:
X (num of samples, features or callable returning np.ndarray): samples
Returns:
"""
self._data = X() if callable(X) else X
if isinstance(self._data, np.ndarray):
if self.node_names and len(self.node_names) == self._data.shape[1]:
pass
elif self.node_names:
raise ValueError(
f"The number of node does NOT match the column num of samples."
)
else:
logger.info(
f"No node name specified. Use arbitrary names like x0, x1..."
)
self.node_names = [f"x{i}" for i in range(self._data.shape[1])]
elif isinstance(self._data, dict):
self.node_names = self._data.get("node_names")
self._data = self._data.get("data")
elif isinstance(self._data, tuple):
self.node_names = [self._data[0]]
self._data = self._data[1]
else:
raise ValueError(f"No reasonal input data. {type(self._data)}") | strategy = Strategy(node_names=self.node_names, **kwargs) | 2 | 2023-12-06 05:54:36+00:00 | 8k |
8none1/idealLED | custom_components/ideal_led/config_flow.py | [
{
"identifier": "IDEALLEDInstance",
"path": "custom_components/ideal_led/idealled.py",
"snippet": "class IDEALLEDInstance:\n def __init__(self, address, reset: bool, delay: int, hass) -> None:\n self.loop = asyncio.get_running_loop()\n self._mac = address\n self._reset = reset\n ... | import asyncio
import voluptuous as vol
import logging
from .idealled import IDEALLEDInstance
from typing import Any
from bluetooth_data_tools import human_readable_name
from homeassistant import config_entries
from homeassistant.const import CONF_MAC
from homeassistant.helpers.device_registry import format_mac
from homeassistant.data_entry_flow import FlowResult
from homeassistant.core import callback
from homeassistant.components.bluetooth import (
BluetoothServiceInfoBleak,
async_discovered_service_info,
)
from bluetooth_sensor_state_data import BluetoothData
from home_assistant_bluetooth import BluetoothServiceInfo
from .const import DOMAIN, CONF_RESET, CONF_DELAY | 4,956 | """Confirm discovery."""
LOGGER.debug("Discovered bluetooth devices, step bluetooth confirm, : %s", user_input)
self._set_confirm_only()
return await self.async_step_user()
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the user step to pick discovered device."""
if user_input is not None:
self.mac = user_input[CONF_MAC]
self.name = self.context["title_placeholders"]["name"]
await self.async_set_unique_id(self.mac, raise_on_progress=False)
self._abort_if_unique_id_configured()
return await self.async_step_validate()
current_addresses = self._async_current_ids()
for discovery_info in async_discovered_service_info(self.hass):
self.mac = discovery_info.address
if self.mac in current_addresses:
LOGGER.debug("Device %s in current_addresses", (self.mac))
continue
if (device for device in self._discovered_devices if device.address == self.mac) == ([]):
LOGGER.debug("Device %s in discovered_devices", (device))
continue
device = DeviceData(discovery_info)
if device.supported():
self._discovered_devices.append(device)
if not self._discovered_devices:
return await self.async_step_manual()
LOGGER.debug("Discovered supported devices: %s - %s", self._discovered_devices[0].name(), self._discovered_devices[0].address())
mac_dict = { dev.address(): dev.name() for dev in self._discovered_devices }
return self.async_show_form(
step_id="user", data_schema=vol.Schema(
{
vol.Required(CONF_MAC): vol.In(mac_dict),
}
),
errors={})
async def async_step_validate(self, user_input: "dict[str, Any] | None" = None):
if user_input is not None:
if "flicker" in user_input:
if user_input["flicker"]:
return self.async_create_entry(title=self.name, data={CONF_MAC: self.mac, "name": self.name})
return self.async_abort(reason="cannot_validate")
if "retry" in user_input and not user_input["retry"]:
return self.async_abort(reason="cannot_connect")
error = await self.toggle_light()
if error:
return self.async_show_form(
step_id="validate", data_schema=vol.Schema(
{
vol.Required("retry"): bool
}
), errors={"base": "connect"})
return self.async_show_form(
step_id="validate", data_schema=vol.Schema(
{
vol.Required("flicker"): bool
}
), errors={})
async def async_step_manual(self, user_input: "dict[str, Any] | None" = None):
if user_input is not None:
self.mac = user_input[CONF_MAC]
self.name = user_input["name"]
await self.async_set_unique_id(format_mac(self.mac))
return await self.async_step_validate()
return self.async_show_form(
step_id="manual", data_schema=vol.Schema(
{
vol.Required(CONF_MAC): str,
vol.Required("name"): str
}
), errors={})
async def toggle_light(self):
if not self._instance:
self._instance = IDEALLEDInstance(self.mac, False, 120, self.hass)
try:
await self._instance.update()
await self._instance.turn_on()
await asyncio.sleep(1)
await self._instance.turn_off()
await asyncio.sleep(1)
await self._instance.turn_on()
await asyncio.sleep(1)
await self._instance.turn_off()
except (Exception) as error:
return error
finally:
await self._instance.stop()
@staticmethod
@callback
def async_get_options_flow(entry: config_entries.ConfigEntry):
return OptionsFlowHandler(entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, _user_input=None):
"""Manage the options."""
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
|
LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({("host"): str})
class DeviceData(BluetoothData):
def __init__(self, discovery_info) -> None:
self._discovery = discovery_info
LOGGER.debug("Discovered bluetooth devices, DeviceData, : %s , %s", self._discovery.address, self._discovery.name)
def supported(self):
return self._discovery.name.lower().startswith("isp-")
def address(self):
return self._discovery.address
def get_device_name(self):
return human_readable_name(None, self._discovery.name, self._discovery.address)
def name(self):
return human_readable_name(None, self._discovery.name, self._discovery.address)
def rssi(self):
return self._discovery.rssi
def _start_update(self, service_info: BluetoothServiceInfo) -> None:
"""Update from BLE advertisement data."""
LOGGER.debug("Parsing BLE advertisement data: %s", service_info)
class BJLEDFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self) -> None:
self.mac = None
self._device = None
self._instance = None
self.name = None
self._discovery_info: BluetoothServiceInfoBleak | None = None
self._discovered_device: DeviceData | None = None
self._discovered_devices = []
async def async_step_bluetooth(
self, discovery_info: BluetoothServiceInfoBleak
) -> FlowResult:
"""Handle the bluetooth discovery step."""
LOGGER.debug("Discovered bluetooth devices, step bluetooth, : %s , %s", discovery_info.address, discovery_info.name)
await self.async_set_unique_id(discovery_info.address)
self._abort_if_unique_id_configured()
device = DeviceData(discovery_info)
self.context["title_placeholders"] = {"name": device.name()}
if device.supported():
self._discovered_devices.append(device)
return await self.async_step_bluetooth_confirm()
else:
return self.async_abort(reason="not_supported")
async def async_step_bluetooth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Confirm discovery."""
LOGGER.debug("Discovered bluetooth devices, step bluetooth confirm, : %s", user_input)
self._set_confirm_only()
return await self.async_step_user()
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the user step to pick discovered device."""
if user_input is not None:
self.mac = user_input[CONF_MAC]
self.name = self.context["title_placeholders"]["name"]
await self.async_set_unique_id(self.mac, raise_on_progress=False)
self._abort_if_unique_id_configured()
return await self.async_step_validate()
current_addresses = self._async_current_ids()
for discovery_info in async_discovered_service_info(self.hass):
self.mac = discovery_info.address
if self.mac in current_addresses:
LOGGER.debug("Device %s in current_addresses", (self.mac))
continue
if (device for device in self._discovered_devices if device.address == self.mac) == ([]):
LOGGER.debug("Device %s in discovered_devices", (device))
continue
device = DeviceData(discovery_info)
if device.supported():
self._discovered_devices.append(device)
if not self._discovered_devices:
return await self.async_step_manual()
LOGGER.debug("Discovered supported devices: %s - %s", self._discovered_devices[0].name(), self._discovered_devices[0].address())
mac_dict = { dev.address(): dev.name() for dev in self._discovered_devices }
return self.async_show_form(
step_id="user", data_schema=vol.Schema(
{
vol.Required(CONF_MAC): vol.In(mac_dict),
}
),
errors={})
async def async_step_validate(self, user_input: "dict[str, Any] | None" = None):
if user_input is not None:
if "flicker" in user_input:
if user_input["flicker"]:
return self.async_create_entry(title=self.name, data={CONF_MAC: self.mac, "name": self.name})
return self.async_abort(reason="cannot_validate")
if "retry" in user_input and not user_input["retry"]:
return self.async_abort(reason="cannot_connect")
error = await self.toggle_light()
if error:
return self.async_show_form(
step_id="validate", data_schema=vol.Schema(
{
vol.Required("retry"): bool
}
), errors={"base": "connect"})
return self.async_show_form(
step_id="validate", data_schema=vol.Schema(
{
vol.Required("flicker"): bool
}
), errors={})
async def async_step_manual(self, user_input: "dict[str, Any] | None" = None):
if user_input is not None:
self.mac = user_input[CONF_MAC]
self.name = user_input["name"]
await self.async_set_unique_id(format_mac(self.mac))
return await self.async_step_validate()
return self.async_show_form(
step_id="manual", data_schema=vol.Schema(
{
vol.Required(CONF_MAC): str,
vol.Required("name"): str
}
), errors={})
async def toggle_light(self):
if not self._instance:
self._instance = IDEALLEDInstance(self.mac, False, 120, self.hass)
try:
await self._instance.update()
await self._instance.turn_on()
await asyncio.sleep(1)
await self._instance.turn_off()
await asyncio.sleep(1)
await self._instance.turn_on()
await asyncio.sleep(1)
await self._instance.turn_off()
except (Exception) as error:
return error
finally:
await self._instance.stop()
@staticmethod
@callback
def async_get_options_flow(entry: config_entries.ConfigEntry):
return OptionsFlowHandler(entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, _user_input=None):
"""Manage the options."""
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {} | options = self.config_entry.options or {CONF_RESET: False,CONF_DELAY: 120} | 2 | 2023-12-14 08:01:32+00:00 | 8k |
amirzandieh/HyperAttention | hyper_attention.py | [
{
"identifier": "add_self_attentions",
"path": "src/attn_utils.py",
"snippet": "def add_self_attentions(attn1, lse1, attn2, lse2):\n \"\"\"\n inputs:\n - attn1, attn2: 4d-tensors with shape [b, h, n, d]\n - lse1, lse2: 4d-tensors of log-sum-exp with shape [b, h, n, 1]\n output:\n ... | import torch
from src.attn_utils import add_self_attentions
from src.flash_attn_triton import flash_attn_func
from src.hyper_attn_triton import hyper_attn_func
from src.angular_lsh_triton import AngularLSHTriton | 4,569 |
class HyperAttention(torch.nn.Module):
def __init__(self, input_dim=64, lsh_num_projs=8, block_size=256, sample_size=256, min_seq_len=2048,
smooth_block=False, **kwargs):
"""
- block_size and sample_size must be divisible by 128
"""
super().__init__()
self.input_dim = input_dim
self.lsh_num_projs = lsh_num_projs
self.block_size = block_size
self.sample_size = sample_size
self.min_seq_len = min_seq_len
self.smooth_block = smooth_block
self.lsh = AngularLSHTriton(num_projs=self.lsh_num_projs, dim=(1, 1, input_dim))
def forward(self, query: torch.tensor, key: torch.tensor, value: torch.tensor, scale=None, causal=False,
return_lse=False):
"""
Forward function for HyperAttention. If no causal masking, simply invokes forward_no_causal_mask method.
If there is causal masking, it partitions the attention matrix and recurses on the partitions.
inputs:
- query, key, and valu: must have same sequence lengths but dimension of values vectors can be different
from that of query or key
- sequence lengths must be divisible by block_size
output:
- attn: (approximation of) the final attention output tensor
- lse: (approximation of) log sum exp of the qk matrix
"""
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
n_query = query.shape[2]
batch_size, n_heads, n_key, dim = key.shape
scale = scale or dim ** (-0.5)
assert n_query == n_key
# without causal masking
if causal is False:
attn, lse = self.forward_no_causal_mask(query, key, value, scale)
else: # with causal masking
if n_key <= self.min_seq_len:
attn, lse = flash_attn_func(query.transpose(1, 2),
key.transpose(1, 2),
value.transpose(1, 2),
None, True, scale)
attn = attn.transpose(1, 2)
else:
# If n_query is odd we pad inputs by zero rows
if n_query % 2:
query = torch.nn.functional.pad(query, (0, 0, 0, 1), mode='constant', value=0.)
key = torch.nn.functional.pad(key, (0, 0, 0, 1), mode='constant', value=0.)
value = torch.nn.functional.pad(value, (0, 0, 0, 1), mode='constant', value=0.)
# extract block diagonal parts
q_bd = query.view(batch_size, 2 * n_heads, query.shape[2] // 2, query.shape[-1])
k_bd = key.view(batch_size, 2 * n_heads, key.shape[2] // 2, key.shape[-1])
v_bd = value.view(batch_size, 2 * n_heads, key.shape[2] // 2, value.shape[-1])
attn_bd, lse_bd = self.forward(q_bd, k_bd, v_bd, scale, True, True)
if attn_bd.shape[2] not in attn_bd.stride():
attn_bd = attn_bd.contiguous()
attn_bd = attn_bd.view(batch_size, n_heads, -1, dim)
if lse_bd.shape[2] not in lse_bd.stride():
lse_bd = lse_bd.contiguous()
lse_bd = lse_bd.view(batch_size, n_heads, -1, 1)
# lowe diagonal block is an unmasked attention
attn_unmasked, lse_unmasked = self.forward_no_causal_mask(
query[:, :, key.shape[2] // 2:, :], key[:, :, :key.shape[2] // 2, :],
value[:, :, :key.shape[2] // 2, :], scale)
attn_up, lse_up = attn_bd[:, :, :query.shape[2] // 2, :], lse_bd[:, :, :query.shape[2] // 2, :]
|
class HyperAttention(torch.nn.Module):
def __init__(self, input_dim=64, lsh_num_projs=8, block_size=256, sample_size=256, min_seq_len=2048,
smooth_block=False, **kwargs):
"""
- block_size and sample_size must be divisible by 128
"""
super().__init__()
self.input_dim = input_dim
self.lsh_num_projs = lsh_num_projs
self.block_size = block_size
self.sample_size = sample_size
self.min_seq_len = min_seq_len
self.smooth_block = smooth_block
self.lsh = AngularLSHTriton(num_projs=self.lsh_num_projs, dim=(1, 1, input_dim))
def forward(self, query: torch.tensor, key: torch.tensor, value: torch.tensor, scale=None, causal=False,
return_lse=False):
"""
Forward function for HyperAttention. If no causal masking, simply invokes forward_no_causal_mask method.
If there is causal masking, it partitions the attention matrix and recurses on the partitions.
inputs:
- query, key, and valu: must have same sequence lengths but dimension of values vectors can be different
from that of query or key
- sequence lengths must be divisible by block_size
output:
- attn: (approximation of) the final attention output tensor
- lse: (approximation of) log sum exp of the qk matrix
"""
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
n_query = query.shape[2]
batch_size, n_heads, n_key, dim = key.shape
scale = scale or dim ** (-0.5)
assert n_query == n_key
# without causal masking
if causal is False:
attn, lse = self.forward_no_causal_mask(query, key, value, scale)
else: # with causal masking
if n_key <= self.min_seq_len:
attn, lse = flash_attn_func(query.transpose(1, 2),
key.transpose(1, 2),
value.transpose(1, 2),
None, True, scale)
attn = attn.transpose(1, 2)
else:
# If n_query is odd we pad inputs by zero rows
if n_query % 2:
query = torch.nn.functional.pad(query, (0, 0, 0, 1), mode='constant', value=0.)
key = torch.nn.functional.pad(key, (0, 0, 0, 1), mode='constant', value=0.)
value = torch.nn.functional.pad(value, (0, 0, 0, 1), mode='constant', value=0.)
# extract block diagonal parts
q_bd = query.view(batch_size, 2 * n_heads, query.shape[2] // 2, query.shape[-1])
k_bd = key.view(batch_size, 2 * n_heads, key.shape[2] // 2, key.shape[-1])
v_bd = value.view(batch_size, 2 * n_heads, key.shape[2] // 2, value.shape[-1])
attn_bd, lse_bd = self.forward(q_bd, k_bd, v_bd, scale, True, True)
if attn_bd.shape[2] not in attn_bd.stride():
attn_bd = attn_bd.contiguous()
attn_bd = attn_bd.view(batch_size, n_heads, -1, dim)
if lse_bd.shape[2] not in lse_bd.stride():
lse_bd = lse_bd.contiguous()
lse_bd = lse_bd.view(batch_size, n_heads, -1, 1)
# lowe diagonal block is an unmasked attention
attn_unmasked, lse_unmasked = self.forward_no_causal_mask(
query[:, :, key.shape[2] // 2:, :], key[:, :, :key.shape[2] // 2, :],
value[:, :, :key.shape[2] // 2, :], scale)
attn_up, lse_up = attn_bd[:, :, :query.shape[2] // 2, :], lse_bd[:, :, :query.shape[2] // 2, :] | attn_down, lse_down = add_self_attentions(attn_bd[:, :, query.shape[2] // 2:, :], | 0 | 2023-12-08 21:28:22+00:00 | 8k |
Psivant/femto | femto/fe/tests/septop/test_runner.py | [
{
"identifier": "_prepare_complex_phase",
"path": "femto/fe/septop/_runner.py",
"snippet": "@femto.md.utils.mpi.run_on_rank_zero\ndef _prepare_complex_phase(\n config: \"femto.fe.septop.SepTopPhaseConfig\",\n ligand_1_coords: pathlib.Path,\n ligand_1_params: pathlib.Path,\n ligand_2_coords: ... | import openmm
import parmed
import femto.fe.inputs
import femto.fe.septop
import femto.fe.utils.queue
from femto.fe.septop._runner import (
_prepare_complex_phase,
_prepare_solution_phase,
run_complex_phase,
run_solution_phase,
submit_network,
)
from femto.fe.tests.systems import CDK2_SYSTEM
from femto.md.tests.mocking import build_mock_structure | 3,831 |
def test_prepare_solution_phase(mock_bfe_directory, mocker):
mock_setup = mocker.patch(
"femto.fe.septop._setup.setup_solution",
autospec=True,
return_value=(parmed.Structure(), openmm.System()),
)
ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2"
ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7"
ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2"
ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7"
ligand_1_ref_atoms = ("@1", "@2", "@3")
ligand_2_ref_atoms = ("@4", "@5", "@6")
config = femto.fe.septop.SepTopConfig().solution
topology, system = _prepare_solution_phase(
config,
ligand_1_coords,
ligand_1_params,
ligand_2_coords,
ligand_2_params,
ligand_1_ref_atoms,
ligand_2_ref_atoms,
)
assert isinstance(system, openmm.System)
assert isinstance(topology, parmed.Structure)
mock_setup.assert_called_once_with(
config.setup, mocker.ANY, mocker.ANY, ligand_1_ref_atoms, ligand_2_ref_atoms
)
def test_prepare_complex_phase(mock_bfe_directory, mocker):
mock_setup = mocker.patch(
"femto.fe.septop.setup_complex",
autospec=True,
return_value=(parmed.Structure(), openmm.System()),
)
mock_parameterize = mocker.patch(
"femto.md.utils.amber.parameterize_structure", autospec=True
)
receptor_coords = mock_bfe_directory / "proteins/cdk2/protein.pdb"
receptor_params = None
ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2"
ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7"
ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2"
ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7"
ligand_1_ref_atoms = ("@1", "@2", "@3")
ligand_2_ref_atoms = ("@4", "@5", "@6")
receptor_ref_atoms = ("@7", "@8", "@9")
config = femto.fe.septop.SepTopConfig().complex
|
def test_prepare_solution_phase(mock_bfe_directory, mocker):
mock_setup = mocker.patch(
"femto.fe.septop._setup.setup_solution",
autospec=True,
return_value=(parmed.Structure(), openmm.System()),
)
ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2"
ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7"
ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2"
ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7"
ligand_1_ref_atoms = ("@1", "@2", "@3")
ligand_2_ref_atoms = ("@4", "@5", "@6")
config = femto.fe.septop.SepTopConfig().solution
topology, system = _prepare_solution_phase(
config,
ligand_1_coords,
ligand_1_params,
ligand_2_coords,
ligand_2_params,
ligand_1_ref_atoms,
ligand_2_ref_atoms,
)
assert isinstance(system, openmm.System)
assert isinstance(topology, parmed.Structure)
mock_setup.assert_called_once_with(
config.setup, mocker.ANY, mocker.ANY, ligand_1_ref_atoms, ligand_2_ref_atoms
)
def test_prepare_complex_phase(mock_bfe_directory, mocker):
mock_setup = mocker.patch(
"femto.fe.septop.setup_complex",
autospec=True,
return_value=(parmed.Structure(), openmm.System()),
)
mock_parameterize = mocker.patch(
"femto.md.utils.amber.parameterize_structure", autospec=True
)
receptor_coords = mock_bfe_directory / "proteins/cdk2/protein.pdb"
receptor_params = None
ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2"
ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7"
ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2"
ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7"
ligand_1_ref_atoms = ("@1", "@2", "@3")
ligand_2_ref_atoms = ("@4", "@5", "@6")
receptor_ref_atoms = ("@7", "@8", "@9")
config = femto.fe.septop.SepTopConfig().complex
| topology, system = _prepare_complex_phase( | 0 | 2023-12-07 15:28:18+00:00 | 8k |
AIFSH/NativeDancer | nativedancer/processors/frame/modules/face_enhancer.py | [
{
"identifier": "helperdoc",
"path": "nativedancer/helperdoc.py",
"snippet": "DOC =\\\n{\n\t'python_not_supported': 'Python version is not supported, upgrade to {version} or higher',\n\t'ffmpeg_not_installed': 'FFMpeg is not installed',\n\t'install_dependency_help': 'select the variant of {dependency} t... | from typing import Any, List, Dict, Literal, Optional
from argparse import ArgumentParser
from nativedancer import helperdoc
from nativedancer.face_analyser import get_many_faces, clear_face_analyser
from nativedancer.face_helper import warp_face, paste_back
from nativedancer.content_analyser import clear_content_analyser
from nativedancer.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel
from nativedancer.utils import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, create_metavar, update_status
from nativedancer.vision import read_image, read_static_image, write_image
from nativedancer.processors.frame import globals as frame_processors_globals
from nativedancer.processors.frame import choices as frame_processors_choices
import cv2
import threading
import numpy
import onnxruntime
import nativedancer.globals
import nativedancer.processors.frame.core as frame_processors | 4,828 |
FRAME_PROCESSOR = None
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
THREAD_LOCK : threading.Lock = threading.Lock()
NAME = 'NATIVEDANCER.FRAME_PROCESSOR.FACE_ENHANCER'
MODELS : Dict[str, ModelValue] =\
{
'codeformer':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
'path': resolve_relative_path('../weights/face_enhancer/codeformer.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.2':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.2.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.3':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.3.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.4':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.4.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gpen_bfr_256':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_256.onnx'),
'template': 'arcface_v2',
'size': (128, 256)
},
'gpen_bfr_512':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_512.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'restoreformer':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
'path': resolve_relative_path('../weights/face_enhancer/restoreformer.onnx'),
'template': 'ffhq',
'size': (512, 512)
}
}
OPTIONS : Optional[OptionsWithModel] = None
def get_frame_processor() -> Any:
global FRAME_PROCESSOR
with THREAD_LOCK:
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = nativedancer.globals.execution_providers)
return FRAME_PROCESSOR
def clear_frame_processor() -> None:
global FRAME_PROCESSOR
FRAME_PROCESSOR = None
def get_options(key : Literal['model']) -> Any:
global OPTIONS
if OPTIONS is None:
OPTIONS =\
{
'model': MODELS[frame_processors_globals.face_enhancer_model]
}
return OPTIONS.get(key)
def set_options(key : Literal['model'], value : Any) -> None:
global OPTIONS
OPTIONS[key] = value
def register_args(program : ArgumentParser) -> None:
program.add_argument('--face-enhancer-model', help = helperdoc.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
|
FRAME_PROCESSOR = None
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
THREAD_LOCK : threading.Lock = threading.Lock()
NAME = 'NATIVEDANCER.FRAME_PROCESSOR.FACE_ENHANCER'
MODELS : Dict[str, ModelValue] =\
{
'codeformer':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
'path': resolve_relative_path('../weights/face_enhancer/codeformer.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.2':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.2.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.3':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.3.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.4':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.4.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gpen_bfr_256':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_256.onnx'),
'template': 'arcface_v2',
'size': (128, 256)
},
'gpen_bfr_512':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_512.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'restoreformer':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
'path': resolve_relative_path('../weights/face_enhancer/restoreformer.onnx'),
'template': 'ffhq',
'size': (512, 512)
}
}
OPTIONS : Optional[OptionsWithModel] = None
def get_frame_processor() -> Any:
global FRAME_PROCESSOR
with THREAD_LOCK:
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = nativedancer.globals.execution_providers)
return FRAME_PROCESSOR
def clear_frame_processor() -> None:
global FRAME_PROCESSOR
FRAME_PROCESSOR = None
def get_options(key : Literal['model']) -> Any:
global OPTIONS
if OPTIONS is None:
OPTIONS =\
{
'model': MODELS[frame_processors_globals.face_enhancer_model]
}
return OPTIONS.get(key)
def set_options(key : Literal['model'], value : Any) -> None:
global OPTIONS
OPTIONS[key] = value
def register_args(program : ArgumentParser) -> None:
program.add_argument('--face-enhancer-model', help = helperdoc.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models) | program.add_argument('--face-enhancer-blend', help = helperdoc.get('frame_processor_blend_help'), dest = 'face_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range)) | 13 | 2023-12-10 20:14:00+00:00 | 8k |
ethanweber/nerfiller | nerfiller/inpaint/saicinpainting/training/data/datasets.py | [
{
"identifier": "InpaintingDataset",
"path": "nerfiller/inpaint/saicinpainting/evaluation/data.py",
"snippet": "class InpaintingDataset(Dataset):\n def __init__(self, datadir, img_suffix=\".jpg\", pad_out_to_modulo=None, scale_factor=None):\n self.datadir = datadir\n self.mask_filenames... | import glob
import logging
import os
import random
import albumentations as A
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import webdataset
from omegaconf import open_dict, OmegaConf
from torch.utils.data import (
Dataset,
IterableDataset,
DataLoader,
DistributedSampler,
ConcatDataset,
)
from nerfiller.inpaint.saicinpainting.evaluation.data import (
InpaintingDataset as InpaintingEvaluationDataset,
OurInpaintingDataset as OurInpaintingEvaluationDataset,
ceil_modulo,
InpaintingEvalOnlineDataset,
)
from nerfiller.inpaint.saicinpainting.training.data.aug import (
IAAAffine2,
IAAPerspective2,
)
from nerfiller.inpaint.saicinpainting.training.data.masks import get_mask_generator | 4,338 | return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0)
def get_transforms(transform_variant, out_size):
if transform_variant == "default":
transform = A.Compose(
[
A.RandomScale(scale_limit=0.2), # +/- 20%
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.7, 1.3), rotate=(-40, 40), shear=(-0.1, 0.1)),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_scale05_1":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.5, 1.0), rotate=(-40, 40), shear=(-0.1, 0.1), p=1),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_scale03_12":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.3, 1.2), rotate=(-40, 40), shear=(-0.1, 0.1), p=1),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_scale03_07":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.3, 0.7), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), # scale 512 to 256 in average
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_light":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.02)),
IAAAffine2(scale=(0.8, 1.8), rotate=(-20, 20), shear=(-0.03, 0.03)),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "non_space_transform":
transform = A.Compose(
[
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "no_augs":
transform = A.Compose([A.ToFloat()])
else:
raise ValueError(f"Unexpected transform_variant {transform_variant}")
return transform
def make_default_train_dataloader(
indir,
kind="default",
out_size=512,
mask_gen_kwargs=None,
transform_variant="default",
mask_generator_kind="mixed",
dataloader_kwargs=None,
ddp_kwargs=None,
**kwargs,
):
LOGGER.info(f"Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}")
|
LOGGER = logging.getLogger(__name__)
class InpaintingTrainDataset(Dataset):
def __init__(self, indir, mask_generator, transform):
self.in_files = list(glob.glob(os.path.join(indir, "**", "*.jpg"), recursive=True))
self.mask_generator = mask_generator
self.transform = transform
self.iter_i = 0
def __len__(self):
return len(self.in_files)
def __getitem__(self, item):
path = self.in_files[item]
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = self.transform(image=img)["image"]
img = np.transpose(img, (2, 0, 1))
# TODO: maybe generate mask before augmentations? slower, but better for segmentation-based masks
mask = self.mask_generator(img, iter_i=self.iter_i)
self.iter_i += 1
return dict(image=img, mask=mask)
class InpaintingTrainWebDataset(IterableDataset):
def __init__(self, indir, mask_generator, transform, shuffle_buffer=200):
self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode("rgb").to_tuple("jpg")
self.mask_generator = mask_generator
self.transform = transform
def __iter__(self):
for iter_i, (img,) in enumerate(self.impl):
img = np.clip(img * 255, 0, 255).astype("uint8")
img = self.transform(image=img)["image"]
img = np.transpose(img, (2, 0, 1))
mask = self.mask_generator(img, iter_i=iter_i)
yield dict(image=img, mask=mask)
class ImgSegmentationDataset(Dataset):
def __init__(
self,
indir,
mask_generator,
transform,
out_size,
segm_indir,
semantic_seg_n_classes,
):
self.indir = indir
self.segm_indir = segm_indir
self.mask_generator = mask_generator
self.transform = transform
self.out_size = out_size
self.semantic_seg_n_classes = semantic_seg_n_classes
self.in_files = list(glob.glob(os.path.join(indir, "**", "*.jpg"), recursive=True))
def __len__(self):
return len(self.in_files)
def __getitem__(self, item):
path = self.in_files[item]
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (self.out_size, self.out_size))
img = self.transform(image=img)["image"]
img = np.transpose(img, (2, 0, 1))
mask = self.mask_generator(img)
segm, segm_classes = self.load_semantic_segm(path)
result = dict(image=img, mask=mask, segm=segm, segm_classes=segm_classes)
return result
def load_semantic_segm(self, img_path):
segm_path = img_path.replace(self.indir, self.segm_indir).replace(".jpg", ".png")
mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (self.out_size, self.out_size))
tensor = torch.from_numpy(np.clip(mask.astype(int) - 1, 0, None))
ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) # w x h x n_classes
return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0)
def get_transforms(transform_variant, out_size):
if transform_variant == "default":
transform = A.Compose(
[
A.RandomScale(scale_limit=0.2), # +/- 20%
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.7, 1.3), rotate=(-40, 40), shear=(-0.1, 0.1)),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_scale05_1":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.5, 1.0), rotate=(-40, 40), shear=(-0.1, 0.1), p=1),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_scale03_12":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.3, 1.2), rotate=(-40, 40), shear=(-0.1, 0.1), p=1),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_scale03_07":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.06)),
IAAAffine2(scale=(0.3, 0.7), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), # scale 512 to 256 in average
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.OpticalDistortion(),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "distortions_light":
transform = A.Compose(
[
IAAPerspective2(scale=(0.0, 0.02)),
IAAAffine2(scale=(0.8, 1.8), rotate=(-20, 20), shear=(-0.03, 0.03)),
A.PadIfNeeded(min_height=out_size, min_width=out_size),
A.RandomCrop(height=out_size, width=out_size),
A.HorizontalFlip(),
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "non_space_transform":
transform = A.Compose(
[
A.CLAHE(),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
A.ToFloat(),
]
)
elif transform_variant == "no_augs":
transform = A.Compose([A.ToFloat()])
else:
raise ValueError(f"Unexpected transform_variant {transform_variant}")
return transform
def make_default_train_dataloader(
indir,
kind="default",
out_size=512,
mask_gen_kwargs=None,
transform_variant="default",
mask_generator_kind="mixed",
dataloader_kwargs=None,
ddp_kwargs=None,
**kwargs,
):
LOGGER.info(f"Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}")
| mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs) | 6 | 2023-12-07 19:12:08+00:00 | 8k |
nnanhuang/Customize-it-3D | ldm/models/diffusion/ddim.py | [
{
"identifier": "make_ddim_sampling_parameters",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev ... | import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from einops import rearrange
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
from ldm.models.diffusion.sampling_util import renorm_thresholding, norm_thresholding, spatial_norm_thresholding | 4,471 | def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,
dynamic_threshold=None):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [torch.cat([
unconditional_conditioning[k][i],
c[k][i]]) for i in range(len(c[k]))]
else:
c_in[k] = torch.cat([
unconditional_conditioning[k],
c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
print(t, sqrt_one_minus_at, a_t)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
@torch.no_grad()
def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
unconditional_guidance_scale=1.0, unconditional_conditioning=None):
num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
assert t_enc <= num_reference_steps
num_steps = t_enc
if use_original_steps:
alphas_next = self.alphas_cumprod[:num_steps]
alphas = self.alphas_cumprod_prev[:num_steps]
else:
alphas_next = self.ddim_alphas[:num_steps]
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
x_next = x0
intermediates = []
inter_steps = []
for i in tqdm(range(num_steps), desc='Encoding Image'):
t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
if unconditional_guidance_scale == 1.:
noise_pred = self.model.apply_model(x_next, t, c)
else:
assert unconditional_conditioning is not None
e_t_uncond, noise_pred = torch.chunk(
self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
torch.cat((unconditional_conditioning, c))), 2)
noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
weighted_noise_pred = alphas_next[i].sqrt() * (
(1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
x_next = xt_weighted + weighted_noise_pred
if return_intermediates and i % (
num_steps // return_intermediates) == 0 and i < num_steps - 1:
intermediates.append(x_next)
inter_steps.append(i)
elif return_intermediates and i >= num_steps - 2:
intermediates.append(x_next)
inter_steps.append(i)
out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
if return_intermediates:
out.update({'intermediates': intermediates})
return x_next, out
@torch.no_grad()
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
# fast, but does not allow for exact reconstruction
# t serves as an index to gather the correct alphas
if use_original_steps:
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
else:
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
if noise is None:
noise = torch.randn_like(x0)
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def to(self, device):
"""Same as to in torch module
Don't really underestand why this isn't a module in the first place"""
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_v = getattr(self, k).to(device)
setattr(self, k, new_v)
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
dynamic_threshold=None,
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list): ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
t_start=-1):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
timesteps = timesteps[:t_start]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
# print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold)
img, pred_x0 = outs
if callback:
img = callback(i, img, pred_x0)
if img_callback:
img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,
dynamic_threshold=None):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [torch.cat([
unconditional_conditioning[k][i],
c[k][i]]) for i in range(len(c[k]))]
else:
c_in[k] = torch.cat([
unconditional_conditioning[k],
c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
print(t, sqrt_one_minus_at, a_t)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
@torch.no_grad()
def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
unconditional_guidance_scale=1.0, unconditional_conditioning=None):
num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
assert t_enc <= num_reference_steps
num_steps = t_enc
if use_original_steps:
alphas_next = self.alphas_cumprod[:num_steps]
alphas = self.alphas_cumprod_prev[:num_steps]
else:
alphas_next = self.ddim_alphas[:num_steps]
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
x_next = x0
intermediates = []
inter_steps = []
for i in tqdm(range(num_steps), desc='Encoding Image'):
t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
if unconditional_guidance_scale == 1.:
noise_pred = self.model.apply_model(x_next, t, c)
else:
assert unconditional_conditioning is not None
e_t_uncond, noise_pred = torch.chunk(
self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
torch.cat((unconditional_conditioning, c))), 2)
noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
weighted_noise_pred = alphas_next[i].sqrt() * (
(1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
x_next = xt_weighted + weighted_noise_pred
if return_intermediates and i % (
num_steps // return_intermediates) == 0 and i < num_steps - 1:
intermediates.append(x_next)
inter_steps.append(i)
elif return_intermediates and i >= num_steps - 2:
intermediates.append(x_next)
inter_steps.append(i)
out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
if return_intermediates:
out.update({'intermediates': intermediates})
return x_next, out
@torch.no_grad()
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
# fast, but does not allow for exact reconstruction
# t serves as an index to gather the correct alphas
if use_original_steps:
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
else:
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
if noise is None:
noise = torch.randn_like(x0) | return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + | 3 | 2023-12-14 11:03:35+00:00 | 8k |
TaoHuang13/diffusion_reward | diffusion_reward/models/video_models/vqdiffusion/engine/solver.py | [
{
"identifier": "get_rank",
"path": "diffusion_reward/models/video_models/vqdiffusion/distributed/distributed.py",
"snippet": "def get_rank():\n if not dist.is_available():\n return 0\n\n if not dist.is_initialized():\n return 0\n\n return dist.get_rank()"
},
{
"identifier... | import copy
import math
import os
import time
import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from omegaconf import OmegaConf
from PIL import Image
from torch.optim.lr_scheduler import ReduceLROnPlateau
from ..distributed.distributed import get_rank, is_primary, reduce_dict
from ..engine.ema import EMA
from ..engine.lr_scheduler import ReduceLROnPlateauWithWarmup
from ..utils.misc import (format_seconds, get_model_parameters_info,
instantiate_from_config)
from torch.cuda.amp import GradScaler, autocast | 5,966 | if 'ema' in config['solver'] and args.local_rank == 0:
ema_args = config['solver']['ema']
ema_args = OmegaConf.to_container(copy.deepcopy(ema_args), resolve=True)
ema_args['model'] = self.model
self.ema = EMA(**ema_args)
else:
self.ema = None
self.logger.log_info(str(get_model_parameters_info(self.model)))
self.model.cuda()
self.device = self.model.device
if self.args.distributed:
self.logger.log_info('Distributed, begin DDP the model...')
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=False)
self.logger.log_info('Distributed, DDP model done!')
# prepare for amp
self.args.amp = self.args.amp and AMP
if self.args.amp:
self.scaler = GradScaler()
self.logger.log_info('Using AMP for training!')
self.logger.log_info("{}: global rank {}: prepare solver done!".format(self.args.exp_name,self.args.global_rank), check_primary=False)
self.best_loss = float('inf')
def _get_optimizer_and_scheduler(self, op_sc_list):
optimizer_and_scheduler = {}
for op_sc_cfg in op_sc_list:
op_sc = {
'name': op_sc_cfg.get('name', 'none'),
'start_epoch': op_sc_cfg.get('start_epoch', 0),
'end_epoch': op_sc_cfg.get('end_epoch', -1),
'start_iteration': op_sc_cfg.get('start_iteration', 0),
'end_iteration': op_sc_cfg.get('end_iteration', -1),
}
if op_sc['name'] == 'none':
# parameters = self.model.parameters()
parameters = filter(lambda p: p.requires_grad, self.model.parameters())
else:
# NOTE: get the parameters with the given name, the parameters() should be overide
parameters = self.model.parameters(name=op_sc['name'])
# build optimizer
op_cfg = op_sc_cfg.get('optimizer', {'target': 'torch.optim.SGD', 'params': {}})
op_cfg = OmegaConf.to_container(copy.deepcopy(op_cfg), resolve=True)
if 'params' not in op_cfg:
op_cfg['params'] = {}
if 'lr' not in op_cfg['params']:
op_cfg['params']['lr'] = self.lr
op_cfg['params']['params'] = parameters
optimizer = instantiate_from_config(op_cfg)
op_sc['optimizer'] = {
'module': optimizer,
'step_iteration': op_cfg.get('step_iteration', 1)
}
assert isinstance(op_sc['optimizer']['step_iteration'], int), 'optimizer steps should be a integer number of iterations'
# build scheduler
if 'scheduler' in op_sc_cfg:
sc_cfg = OmegaConf.to_container(copy.deepcopy(op_sc_cfg['scheduler']), resolve=True)
sc_cfg['params']['optimizer'] = optimizer
# for cosine annealing lr, compute T_max
if sc_cfg['target'].split('.')[-1] in ['CosineAnnealingLRWithWarmup', 'CosineAnnealingLR']:
T_max = self.max_epochs * self.dataloader['train_iterations']
sc_cfg['params']['T_max'] = T_max
scheduler = instantiate_from_config(sc_cfg)
op_sc['scheduler'] = {
'module': scheduler,
'step_iteration': sc_cfg.get('step_iteration', 1)
}
if op_sc['scheduler']['step_iteration'] == 'epoch':
op_sc['scheduler']['step_iteration'] = self.dataloader['train_iterations']
optimizer_and_scheduler[op_sc['name']] = op_sc
return optimizer_and_scheduler
def _get_lr(self, return_type='str'):
lrs = {}
for op_sc_n, op_sc in self.optimizer_and_scheduler.items():
lr = op_sc['optimizer']['module'].state_dict()['param_groups'][0]['lr']
lrs[op_sc_n+'_lr'] = round(lr, 10)
if return_type == 'str':
lrs = str(lrs)
lrs = lrs.replace('none', 'lr').replace('{', '').replace('}','').replace('\'', '')
elif return_type == 'dict':
pass
else:
raise ValueError('Unknow of return type: {}'.format(return_type))
return lrs
def sample(self, batch, phase='train', step_type='iteration'):
tic = time.time()
self.logger.log_info('Begin to sample...')
if self.ema is not None:
self.ema.modify_to_inference()
suffix = '_ema'
else:
suffix = ''
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model = self.model.module
else:
model = self.model
with torch.no_grad():
if self.debug == False:
if self.args.amp:
with autocast():
samples = model.sample(batch=batch, step=self.last_iter)
else:
samples = model.sample(batch=batch, step=self.last_iter)
else:
samples = model.sample(batch=batch[0].cuda(), step=self.last_iter)
step = self.last_iter if step_type == 'iteration' else self.last_epoch
for k, v in samples.items():
save_dir = os.path.join(self.image_dir, phase, k)
os.makedirs(save_dir, exist_ok=True)
| # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
try:
AMP = True
except:
print('Warning: import torch.amp failed, so no amp will be used!')
AMP = False
matplotlib.use('Agg')
STEP_WITH_LOSS_SCHEDULERS = (ReduceLROnPlateauWithWarmup, ReduceLROnPlateau)
class Solver(object):
def __init__(self, config, args, model, dataloader, logger):
self.config = config
self.args = args
self.model = model
self.dataloader = dataloader
self.logger = logger
self.max_epochs = config['solver']['max_epochs']
self.save_epochs = config['solver']['save_epochs']
self.save_iterations = config['solver'].get('save_iterations', -1)
self.sample_iterations = config['solver']['sample_iterations']
if self.sample_iterations == 'epoch':
self.sample_iterations = self.dataloader['train_iterations']
self.validation_epochs = config['solver'].get('validation_epochs', 2)
assert isinstance(self.save_epochs, (int, list))
assert isinstance(self.validation_epochs, (int, list))
self.debug = config['solver'].get('debug', False)
self.last_epoch = -1
self.last_iter = -1
# self.ckpt_dir = os.path.join(args.save_dir, 'checkpoint')
# self.image_dir = os.path.join(args.save_dir, 'images')
self.ckpt_dir = "checkpoint"
self.image_dir = "images"
os.makedirs(self.ckpt_dir, exist_ok=True)
os.makedirs(self.image_dir, exist_ok=True)
# get grad_clipper
if 'clip_grad_norm' in config['solver']:
self.clip_grad_norm = instantiate_from_config(config['solver']['clip_grad_norm'])
else:
self.clip_grad_norm = None
# get lr
adjust_lr = config['solver'].get('adjust_lr', 'sqrt')
base_lr = config['solver'].get('base_lr', 1.0e-4)
if adjust_lr == 'none':
self.lr = base_lr
elif adjust_lr == 'sqrt':
self.lr = base_lr * math.sqrt(args.world_size * config['dataloader']['batch_size'])
elif adjust_lr == 'linear':
self.lr = base_lr * args.world_size * config['dataloader']['batch_size']
else:
raise NotImplementedError('Unknown type of adjust lr {}!'.format(adjust_lr))
self.logger.log_info('Get lr {} from base lr {} with {}'.format(self.lr, base_lr, adjust_lr))
if hasattr(model, 'get_optimizer_and_scheduler') and callable(getattr(model, 'get_optimizer_and_scheduler')):
optimizer_and_scheduler = model.get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])
else:
optimizer_and_scheduler = self._get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])
assert type(optimizer_and_scheduler) == type({}), 'optimizer and schduler should be a dict!'
self.optimizer_and_scheduler = optimizer_and_scheduler
# configre for ema
if 'ema' in config['solver'] and args.local_rank == 0:
ema_args = config['solver']['ema']
ema_args = OmegaConf.to_container(copy.deepcopy(ema_args), resolve=True)
ema_args['model'] = self.model
self.ema = EMA(**ema_args)
else:
self.ema = None
self.logger.log_info(str(get_model_parameters_info(self.model)))
self.model.cuda()
self.device = self.model.device
if self.args.distributed:
self.logger.log_info('Distributed, begin DDP the model...')
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=False)
self.logger.log_info('Distributed, DDP model done!')
# prepare for amp
self.args.amp = self.args.amp and AMP
if self.args.amp:
self.scaler = GradScaler()
self.logger.log_info('Using AMP for training!')
self.logger.log_info("{}: global rank {}: prepare solver done!".format(self.args.exp_name,self.args.global_rank), check_primary=False)
self.best_loss = float('inf')
def _get_optimizer_and_scheduler(self, op_sc_list):
optimizer_and_scheduler = {}
for op_sc_cfg in op_sc_list:
op_sc = {
'name': op_sc_cfg.get('name', 'none'),
'start_epoch': op_sc_cfg.get('start_epoch', 0),
'end_epoch': op_sc_cfg.get('end_epoch', -1),
'start_iteration': op_sc_cfg.get('start_iteration', 0),
'end_iteration': op_sc_cfg.get('end_iteration', -1),
}
if op_sc['name'] == 'none':
# parameters = self.model.parameters()
parameters = filter(lambda p: p.requires_grad, self.model.parameters())
else:
# NOTE: get the parameters with the given name, the parameters() should be overide
parameters = self.model.parameters(name=op_sc['name'])
# build optimizer
op_cfg = op_sc_cfg.get('optimizer', {'target': 'torch.optim.SGD', 'params': {}})
op_cfg = OmegaConf.to_container(copy.deepcopy(op_cfg), resolve=True)
if 'params' not in op_cfg:
op_cfg['params'] = {}
if 'lr' not in op_cfg['params']:
op_cfg['params']['lr'] = self.lr
op_cfg['params']['params'] = parameters
optimizer = instantiate_from_config(op_cfg)
op_sc['optimizer'] = {
'module': optimizer,
'step_iteration': op_cfg.get('step_iteration', 1)
}
assert isinstance(op_sc['optimizer']['step_iteration'], int), 'optimizer steps should be a integer number of iterations'
# build scheduler
if 'scheduler' in op_sc_cfg:
sc_cfg = OmegaConf.to_container(copy.deepcopy(op_sc_cfg['scheduler']), resolve=True)
sc_cfg['params']['optimizer'] = optimizer
# for cosine annealing lr, compute T_max
if sc_cfg['target'].split('.')[-1] in ['CosineAnnealingLRWithWarmup', 'CosineAnnealingLR']:
T_max = self.max_epochs * self.dataloader['train_iterations']
sc_cfg['params']['T_max'] = T_max
scheduler = instantiate_from_config(sc_cfg)
op_sc['scheduler'] = {
'module': scheduler,
'step_iteration': sc_cfg.get('step_iteration', 1)
}
if op_sc['scheduler']['step_iteration'] == 'epoch':
op_sc['scheduler']['step_iteration'] = self.dataloader['train_iterations']
optimizer_and_scheduler[op_sc['name']] = op_sc
return optimizer_and_scheduler
def _get_lr(self, return_type='str'):
lrs = {}
for op_sc_n, op_sc in self.optimizer_and_scheduler.items():
lr = op_sc['optimizer']['module'].state_dict()['param_groups'][0]['lr']
lrs[op_sc_n+'_lr'] = round(lr, 10)
if return_type == 'str':
lrs = str(lrs)
lrs = lrs.replace('none', 'lr').replace('{', '').replace('}','').replace('\'', '')
elif return_type == 'dict':
pass
else:
raise ValueError('Unknow of return type: {}'.format(return_type))
return lrs
def sample(self, batch, phase='train', step_type='iteration'):
tic = time.time()
self.logger.log_info('Begin to sample...')
if self.ema is not None:
self.ema.modify_to_inference()
suffix = '_ema'
else:
suffix = ''
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model = self.model.module
else:
model = self.model
with torch.no_grad():
if self.debug == False:
if self.args.amp:
with autocast():
samples = model.sample(batch=batch, step=self.last_iter)
else:
samples = model.sample(batch=batch, step=self.last_iter)
else:
samples = model.sample(batch=batch[0].cuda(), step=self.last_iter)
step = self.last_iter if step_type == 'iteration' else self.last_epoch
for k, v in samples.items():
save_dir = os.path.join(self.image_dir, phase, k)
os.makedirs(save_dir, exist_ok=True) | save_path = os.path.join(save_dir, 'e{:010d}_itr{:010d}_rank{}{}'.format(self.last_epoch, self.last_iter%self.dataloader['train_iterations'], get_rank(), suffix)) | 0 | 2023-12-05 02:42:28+00:00 | 8k |
mkang315/ASF-YOLO | models/yolo.py | [
{
"identifier": "check_anchor_order",
"path": "utils/autoanchor.py",
"snippet": "def check_anchor_order(m):\n # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer\n da = a... | import argparse
import contextlib
import os
import platform
import sys
import thop # for FLOPs computation
import yaml # for torch hub
from copy import deepcopy
from pathlib import Path
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
from utils.plots import feature_visualization
from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
time_sync) | 5,178 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
YOLO-specific modules
Usage:
$ python models/yolo.py --cfg yolov5s.yaml
"""
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if platform.system() != 'Windows':
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
try:
except ImportError:
thop = None
class Detect(nn.Module):
# YOLOv5 Detect head for detection models
stride = None # strides computed during build
dynamic = False # force grid reconstruction
export = False # export mode
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
super().__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use inplace ops (e.g. slice assignment)
def forward(self, x):
z = [] # inference output
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
if isinstance(self, Segment): # (boxes + masks)
xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
else: # Detect (boxes only)
xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, conf), 4)
z.append(y.view(bs, self.na * nx * ny, self.no))
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
d = self.anchors[i].device
t = self.anchors[i].dtype
shape = 1, self.na, ny, nx, 2 # grid shape
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
return grid, anchor_grid
class Segment(Detect):
# YOLOv5 Segment head for segmentation models
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
super().__init__(nc, anchors, ch, inplace)
self.nm = nm # number of masks
self.npr = npr # number of protos
self.no = 5 + nc + self.nm # number of outputs per anchor
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.proto = Proto(ch[0], self.npr, self.nm) # protos
self.detect = Detect.forward
def forward(self, x):
p = self.proto(x[0])
x = self.detect(self, x)
return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
class BaseModel(nn.Module):
# YOLOv5 base model
def forward(self, x, profile=False, visualize=False):
return self._forward_once(x, profile, visualize) # single-scale inference, train
def _forward_once(self, x, profile=False, visualize=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
| # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
YOLO-specific modules
Usage:
$ python models/yolo.py --cfg yolov5s.yaml
"""
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
if platform.system() != 'Windows':
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
try:
except ImportError:
thop = None
class Detect(nn.Module):
# YOLOv5 Detect head for detection models
stride = None # strides computed during build
dynamic = False # force grid reconstruction
export = False # export mode
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
super().__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use inplace ops (e.g. slice assignment)
def forward(self, x):
z = [] # inference output
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
if isinstance(self, Segment): # (boxes + masks)
xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
else: # Detect (boxes only)
xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, conf), 4)
z.append(y.view(bs, self.na * nx * ny, self.no))
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
d = self.anchors[i].device
t = self.anchors[i].dtype
shape = 1, self.na, ny, nx, 2 # grid shape
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
return grid, anchor_grid
class Segment(Detect):
# YOLOv5 Segment head for segmentation models
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
super().__init__(nc, anchors, ch, inplace)
self.nm = nm # number of masks
self.npr = npr # number of protos
self.no = 5 + nc + self.nm # number of outputs per anchor
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.proto = Proto(ch[0], self.npr, self.nm) # protos
self.detect = Detect.forward
def forward(self, x):
p = self.proto(x[0])
x = self.detect(self, x)
return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
class BaseModel(nn.Module):
# YOLOv5 base model
def forward(self, x, profile=False, visualize=False):
return self._forward_once(x, profile, visualize) # single-scale inference, train
def _forward_once(self, x, profile=False, visualize=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize: | feature_visualization(x, m.type, m.i, save_dir=visualize) | 6 | 2023-12-10 14:18:29+00:00 | 8k |
mmathew23/improved_edm | train.py | [
{
"identifier": "KarrasPipeline",
"path": "pipeline.py",
"snippet": "class KarrasPipeline(DiffusionPipeline):\n model_cpu_offload_seq = \"unet\"\n\n def __init__(self, unet, scheduler, method='euler'):\n super().__init__()\n\n # we ignore this, just having a scheduler for HF compatib... | import torch
import torch.nn.functional as F
import hydra
import os
import shutil
import math
import numpy as np
import re
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from diffusers.utils import make_image_grid
from torchvision.transforms import Compose, ToTensor, Normalize, RandomHorizontalFlip
from omegaconf import DictConfig
from hydra.core.hydra_config import HydraConfig
from diffusers.optimization import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup
from diffusers import EMAModel
from pipeline import KarrasPipeline
from accelerate import Accelerator, DistributedDataParallelKwargs
from accelerate.utils import LoggerType
from tqdm import tqdm
from datasets import load_dataset
from model import UNet2DModel | 5,760 |
class Sampler(torch.utils.data.Sampler):
def __init__(self, dataset_length, seed=31129347):
self.dataset_length = dataset_length
self.seed = seed
def __iter__(self):
rnd = np.random.RandomState(self.seed)
order = np.arange(self.dataset_length)
rnd.shuffle(order)
window = int(np.rint(order.size * 0.5))
if window < 2:
window = 3
idx = 0
while True:
idx = idx % len(order)
yield order[idx]
j = (idx - rnd.randint(window)) % order.size
order[idx], order[j] = order[j], order[idx]
idx += 1
def get_total_steps(config):
# round up, round since casting may round down due to fp precision
total_steps = int(round(config.num_train_kimg * 1000 / (config.train_batch_size) + 0.5))
return total_steps
def map_wrapper(func, from_key, to_key):
def wrapper(example):
example[to_key] = func(example[from_key])
return example
return wrapper
def get_inverse_sqrt_schedule(optimizer, num_warmup_steps, t_ref):
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0 / math.sqrt(max(1.0, (current_step - num_warmup_steps) / t_ref))
return LambdaLR(optimizer, lr_lambda)
def evaluate(config, step, pipeline):
if 'num_class_embeds' in config.unet:
labels = torch.arange(config.unet.num_class_embeds, device='cuda:0')[:config.val_batch_size]
if labels.shape[0] < config.val_batch_size:
labels = labels.repeat(config.val_batch_size//labels.shape[0] + 1)
labels = labels[:config.val_batch_size]
else:
labels = None
for i in range(1):
images = pipeline(
batch_size=config.val_batch_size,
class_labels=labels,
generator=torch.manual_seed(config.seed+i),
).images
cols = math.ceil(np.sqrt(len(images)))
rows = math.ceil(len(images)/cols)
image_grid = make_image_grid(images, rows=rows, cols=cols)
test_dir = os.path.join(config.output_dir, "samples")
os.makedirs(test_dir, exist_ok=True)
image_grid.save(f"{test_dir}/{step:04d}_{i:03d}.png")
def get_sigma(batch_size, P_mean, P_std, device):
sigma = torch.randn([batch_size, 1, 1, 1], device=device)
sigma = (sigma*P_std + P_mean).exp()
return sigma
def get_sigma_weight(sigma, sigma_data):
w = (sigma**2 + sigma_data**2) / (sigma*sigma_data)**2
return w
def add_noise(sample, noise, sigma):
noise *= sigma
return sample+noise
def replace_grad_nans(model):
# Iterate through all parameters
for name, param in model.named_parameters():
if param.requires_grad and param.grad is not None:
# Replace nan, inf, -inf in gradients with 0
torch.nan_to_num(param.grad, nan=0.0, posinf=0.0, neginf=0.0, out=param.grad)
def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler):
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
assert 'gradient_accumulation_steps' in config and config.gradient_accumulation_steps >= 1
accelerator = Accelerator(
mixed_precision=config.mixed_precision,
gradient_accumulation_steps=config.gradient_accumulation_steps,
log_with=[LoggerType.TENSORBOARD, 'wandb'],
project_dir=os.path.join(config.output_dir, "logs"),
kwargs_handlers=[ddp_kwargs],
split_batches=True
)
is_distributed = accelerator.num_processes > 1
if config.use_ema:
|
class Sampler(torch.utils.data.Sampler):
def __init__(self, dataset_length, seed=31129347):
self.dataset_length = dataset_length
self.seed = seed
def __iter__(self):
rnd = np.random.RandomState(self.seed)
order = np.arange(self.dataset_length)
rnd.shuffle(order)
window = int(np.rint(order.size * 0.5))
if window < 2:
window = 3
idx = 0
while True:
idx = idx % len(order)
yield order[idx]
j = (idx - rnd.randint(window)) % order.size
order[idx], order[j] = order[j], order[idx]
idx += 1
def get_total_steps(config):
# round up, round since casting may round down due to fp precision
total_steps = int(round(config.num_train_kimg * 1000 / (config.train_batch_size) + 0.5))
return total_steps
def map_wrapper(func, from_key, to_key):
def wrapper(example):
example[to_key] = func(example[from_key])
return example
return wrapper
def get_inverse_sqrt_schedule(optimizer, num_warmup_steps, t_ref):
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0 / math.sqrt(max(1.0, (current_step - num_warmup_steps) / t_ref))
return LambdaLR(optimizer, lr_lambda)
def evaluate(config, step, pipeline):
if 'num_class_embeds' in config.unet:
labels = torch.arange(config.unet.num_class_embeds, device='cuda:0')[:config.val_batch_size]
if labels.shape[0] < config.val_batch_size:
labels = labels.repeat(config.val_batch_size//labels.shape[0] + 1)
labels = labels[:config.val_batch_size]
else:
labels = None
for i in range(1):
images = pipeline(
batch_size=config.val_batch_size,
class_labels=labels,
generator=torch.manual_seed(config.seed+i),
).images
cols = math.ceil(np.sqrt(len(images)))
rows = math.ceil(len(images)/cols)
image_grid = make_image_grid(images, rows=rows, cols=cols)
test_dir = os.path.join(config.output_dir, "samples")
os.makedirs(test_dir, exist_ok=True)
image_grid.save(f"{test_dir}/{step:04d}_{i:03d}.png")
def get_sigma(batch_size, P_mean, P_std, device):
sigma = torch.randn([batch_size, 1, 1, 1], device=device)
sigma = (sigma*P_std + P_mean).exp()
return sigma
def get_sigma_weight(sigma, sigma_data):
w = (sigma**2 + sigma_data**2) / (sigma*sigma_data)**2
return w
def add_noise(sample, noise, sigma):
noise *= sigma
return sample+noise
def replace_grad_nans(model):
# Iterate through all parameters
for name, param in model.named_parameters():
if param.requires_grad and param.grad is not None:
# Replace nan, inf, -inf in gradients with 0
torch.nan_to_num(param.grad, nan=0.0, posinf=0.0, neginf=0.0, out=param.grad)
def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler):
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
assert 'gradient_accumulation_steps' in config and config.gradient_accumulation_steps >= 1
accelerator = Accelerator(
mixed_precision=config.mixed_precision,
gradient_accumulation_steps=config.gradient_accumulation_steps,
log_with=[LoggerType.TENSORBOARD, 'wandb'],
project_dir=os.path.join(config.output_dir, "logs"),
kwargs_handlers=[ddp_kwargs],
split_batches=True
)
is_distributed = accelerator.num_processes > 1
if config.use_ema: | ema = EMAModel(model.parameters(), 0.999, model_cls=UNet2DModel, model_config=model.config) | 1 | 2023-12-08 16:23:47+00:00 | 8k |
youngskkim/CRN | models/base_bev_depth.py | [
{
"identifier": "BaseLSSFPN",
"path": "layers/backbones/base_lss_fpn.py",
"snippet": "class BaseLSSFPN(nn.Module):\n def __init__(self, x_bound, y_bound, z_bound, d_bound, final_dim,\n downsample_factor, output_channels, img_backbone_conf,\n img_neck_conf, depth_net_co... | import mmcv
import torch
from torch import nn
from layers.backbones.base_lss_fpn import BaseLSSFPN
from layers.heads.bev_depth_head_det import BEVDepthHead | 6,358 |
logger = mmcv.utils.get_logger('mmdet')
logger.setLevel('WARNING')
__all__ = ['BaseBEVDepth']
class BaseBEVDepth(nn.Module):
"""Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.
Args:
backbone_conf (dict): Config of backbone.
head_conf (dict): Config of head.
"""
def __init__(self, backbone_conf, head_conf):
super(BaseBEVDepth, self).__init__()
self.backbone_img = BaseLSSFPN(**backbone_conf)
|
logger = mmcv.utils.get_logger('mmdet')
logger.setLevel('WARNING')
__all__ = ['BaseBEVDepth']
class BaseBEVDepth(nn.Module):
"""Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.
Args:
backbone_conf (dict): Config of backbone.
head_conf (dict): Config of head.
"""
def __init__(self, backbone_conf, head_conf):
super(BaseBEVDepth, self).__init__()
self.backbone_img = BaseLSSFPN(**backbone_conf) | self.head = BEVDepthHead(**head_conf) | 1 | 2023-12-06 14:57:49+00:00 | 8k |
felixcheng97/AGAP | run.py | [
{
"identifier": "utils",
"path": "lib/utils.py",
"snippet": "def create_optimizer_or_freeze_model(model, cfg_train, global_step):\ndef load_checkpoint(model, optimizer, ckpt_path, no_reload_optimizer):\ndef load_model(model_class, ckpt_path):\ndef rgb_ssim(img0, img1, max_val,\n filter_size=... | import os, sys, copy, glob, json, time, random, argparse
import math
import mmcv
import imageio
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from shutil import copyfile
from tqdm import tqdm, trange
from lib import utils, dvgo, dmpigo, dpvgo
from lib.load_data import load_data
from torch_efficient_distloss import flatten_eff_distloss
from PIL import Image | 5,166 | psnrs.append(p)
if eval_ssim:
ssims.append(utils.rgb_ssim(rgb, gt_imgs[i], max_val=1))
if len(psnrs):
print('Testing psnr', np.mean(psnrs), '(avg)')
if eval_ssim: print('Testing ssim', np.mean(ssims), '(avg)')
if render_video_flipy:
for i in range(len(rgbs)):
rgbs[i] = np.flip(rgbs[i], axis=0)
depths[i] = np.flip(depths[i], axis=0)
bgmaps[i] = np.flip(bgmaps[i], axis=0)
if render_video_rot90 != 0:
for i in range(len(rgbs)):
rgbs[i] = np.rot90(rgbs[i], k=render_video_rot90, axes=(0,1))
depths[i] = np.rot90(depths[i], k=render_video_rot90, axes=(0,1))
bgmaps[i] = np.rot90(bgmaps[i], k=render_video_rot90, axes=(0,1))
if savedir is not None and dump_images:
for i in trange(len(rgbs)):
rgb8 = utils.to8b(rgbs[i])
filename = os.path.join(savedir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
rgbs = np.array(rgbs)
depths = np.array(depths)
bgmaps = np.array(bgmaps)
return rgbs, depths, bgmaps
def seed_everything():
'''Seed everything for better reproducibility.
(some pytorch operation is non-deterministic like the backprop of grid_samples)
'''
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
def load_everything(args, cfg):
'''Load images / poses / camera settings / data split.
'''
data_dict = load_data(cfg.data)
# remove useless field
kept_keys = {
'hwf', 'HW', 'Ks', 'Ks_render', 'near', 'far', 'near_clip',
'i_train', 'i_val', 'i_test', 'irregular_shape',
'poses', 'render_poses', 'images'}
for k in list(data_dict.keys()):
if k not in kept_keys:
data_dict.pop(k)
# construct data tensor
if data_dict['irregular_shape']:
data_dict['images'] = [torch.FloatTensor(im, device='cpu') for im in data_dict['images']]
else:
data_dict['images'] = torch.FloatTensor(data_dict['images'], device='cpu')
data_dict['poses'] = torch.Tensor(data_dict['poses'])
return data_dict
def _compute_bbox_by_cam_frustrm_bounded(cfg, HW, Ks, poses, i_train, near, far):
xyz_min = torch.Tensor([np.inf, np.inf, np.inf])
xyz_max = -xyz_min
for (H, W), K, c2w in zip(HW[i_train], Ks[i_train], poses[i_train]):
rays_o, rays_d, viewdirs = dvgo.get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w,
ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
if cfg.data.ndc:
pts_nf = torch.stack([rays_o+rays_d*near, rays_o+rays_d*far])
else:
pts_nf = torch.stack([rays_o+viewdirs*near, rays_o+viewdirs*far])
xyz_min = torch.minimum(xyz_min, pts_nf.amin((0,1,2)))
xyz_max = torch.maximum(xyz_max, pts_nf.amax((0,1,2)))
return xyz_min, xyz_max
def compute_bbox_by_cam_frustrm(args, cfg, HW, Ks, poses, i_train, near, far, **kwargs):
print('compute_bbox_by_cam_frustrm: start')
if cfg.data.panorama:
xyz_min, xyz_max = -torch.tensor([far, far, far]).float(), torch.tensor([far, far, far]).float()
else:
xyz_min, xyz_max = _compute_bbox_by_cam_frustrm_bounded(
cfg, HW, Ks, poses, i_train, near, far)
print('compute_bbox_by_cam_frustrm: xyz_min', xyz_min)
print('compute_bbox_by_cam_frustrm: xyz_max', xyz_max)
print('compute_bbox_by_cam_frustrm: finish')
return xyz_min, xyz_max
def create_new_model(cfg, cfg_model, cfg_train, xyz_min, xyz_max, stage, coarse_ckpt_path):
model_kwargs = copy.deepcopy(cfg_model)
num_voxels = model_kwargs.pop('num_voxels')
if len(cfg_train.pg_scale):
num_voxels = int(num_voxels / (2**len(cfg_train.pg_scale)))
if cfg.fine_model_and_render.image_size:
image_size = model_kwargs.pop('image_size')
if len(cfg_train.pg_image_scale):
image_size = (image_size[0] // (2**len(cfg_train.pg_image_scale)), image_size[1] // (2**len(cfg_train.pg_image_scale)))
model_kwargs['image_size'] = image_size
if cfg.fine_model_and_render.equ_size:
equ_size = model_kwargs.pop('equ_size')
if len(cfg_train.pg_equ_scale):
equ_size = (equ_size[0] // (2**len(cfg_train.pg_equ_scale)), equ_size[1] // (2**len(cfg_train.pg_equ_scale)))
model_kwargs['equ_size'] = equ_size
if cfg.data.ndc:
print(f'scene_rep_reconstruction ({stage}): \033[96muse multiplane images\033[0m')
model = dmpigo.DirectMPIGO(
xyz_min=xyz_min, xyz_max=xyz_max,
num_voxels=num_voxels,
**model_kwargs)
elif cfg.data.panorama:
|
def config_parser():
'''Define command line arguments
'''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', required=True,
help='config file path')
parser.add_argument("--seed", type=int, default=777,
help='Random seed')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--no_reload_optimizer", action='store_true',
help='do not reload optimizer state from saved ckpt')
parser.add_argument("--ft_path", type=str, default='',
help='specific weights npy file to reload for coarse network')
# testing options
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true')
parser.add_argument("--render_train", action='store_true')
parser.add_argument("--render_video", action='store_true')
parser.add_argument("--render_image", action='store_true')
parser.add_argument("--render_video_flipy", action='store_true')
parser.add_argument("--render_video_rot90", default=0, type=int)
parser.add_argument("--render_video_factor", type=float, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
parser.add_argument("--dump_images", action='store_true')
parser.add_argument("--eval_ssim", action='store_true')
parser.add_argument("--edit", type=str, default='', help='filename of edited k0_xxx.png')
parser.add_argument("--render_panorama", action='store_true')
# logging/saving options
parser.add_argument("--i_print", type=int, default=500,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_weights", type=int, default=100000,
help='frequency of weight ckpt saving')
return parser
@torch.no_grad()
def render_viewpoints(model, render_poses, HW, Ks, ndc, render_kwargs,
gt_imgs=None, savedir=None, dump_images=False,
render_factor=0, render_video_flipy=False, render_video_rot90=0,
eval_ssim=False, render_panorama=False):
'''Render images for the given viewpoints; run evaluation if gt given.
'''
assert len(render_poses) == len(HW) and len(HW) == len(Ks)
if render_factor!=0:
HW = np.copy(HW)
Ks = np.copy(Ks)
HW = (HW/render_factor).astype(int)
Ks[:, :2, :3] /= render_factor
rgbs = []
depths = []
bgmaps = []
psnrs = []
ssims = []
for i, c2w in enumerate(tqdm(render_poses)):
H, W = HW[i]
K = Ks[i]
c2w = torch.Tensor(c2w)
if not render_panorama:
rays_o, rays_d, viewdirs = dvgo.get_rays_of_a_view(
H, W, K, c2w, ndc, inverse_y=render_kwargs['inverse_y'],
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
else:
rays_o, rays_d, viewdirs = dvgo.get_ray_of_a_panorama(
H, W, c2w
)
keys = ['rgb_marched', 'depth', 'alphainv_last']
rays_o = rays_o.flatten(0,-2)
rays_d = rays_d.flatten(0,-2)
viewdirs = viewdirs.flatten(0,-2)
render_result_chunks = [
{k: v for k, v in model(ro, rd, vd, **render_kwargs).items() if k in keys}
for ro, rd, vd in zip(rays_o.split(8192, 0), rays_d.split(8192, 0), viewdirs.split(8192, 0))
]
render_result = {
k: torch.cat([ret[k] for ret in render_result_chunks]).reshape(H,W,-1)
for k in render_result_chunks[0].keys()
}
rgb = render_result['rgb_marched'].cpu().numpy()
depth = render_result['depth'].cpu().numpy()
bgmap = render_result['alphainv_last'].cpu().numpy()
rgbs.append(rgb)
depths.append(depth)
bgmaps.append(bgmap)
if i==0:
print('Testing', rgb.shape)
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb - gt_imgs[i])))
psnrs.append(p)
if eval_ssim:
ssims.append(utils.rgb_ssim(rgb, gt_imgs[i], max_val=1))
if len(psnrs):
print('Testing psnr', np.mean(psnrs), '(avg)')
if eval_ssim: print('Testing ssim', np.mean(ssims), '(avg)')
if render_video_flipy:
for i in range(len(rgbs)):
rgbs[i] = np.flip(rgbs[i], axis=0)
depths[i] = np.flip(depths[i], axis=0)
bgmaps[i] = np.flip(bgmaps[i], axis=0)
if render_video_rot90 != 0:
for i in range(len(rgbs)):
rgbs[i] = np.rot90(rgbs[i], k=render_video_rot90, axes=(0,1))
depths[i] = np.rot90(depths[i], k=render_video_rot90, axes=(0,1))
bgmaps[i] = np.rot90(bgmaps[i], k=render_video_rot90, axes=(0,1))
if savedir is not None and dump_images:
for i in trange(len(rgbs)):
rgb8 = utils.to8b(rgbs[i])
filename = os.path.join(savedir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
rgbs = np.array(rgbs)
depths = np.array(depths)
bgmaps = np.array(bgmaps)
return rgbs, depths, bgmaps
def seed_everything():
'''Seed everything for better reproducibility.
(some pytorch operation is non-deterministic like the backprop of grid_samples)
'''
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
def load_everything(args, cfg):
'''Load images / poses / camera settings / data split.
'''
data_dict = load_data(cfg.data)
# remove useless field
kept_keys = {
'hwf', 'HW', 'Ks', 'Ks_render', 'near', 'far', 'near_clip',
'i_train', 'i_val', 'i_test', 'irregular_shape',
'poses', 'render_poses', 'images'}
for k in list(data_dict.keys()):
if k not in kept_keys:
data_dict.pop(k)
# construct data tensor
if data_dict['irregular_shape']:
data_dict['images'] = [torch.FloatTensor(im, device='cpu') for im in data_dict['images']]
else:
data_dict['images'] = torch.FloatTensor(data_dict['images'], device='cpu')
data_dict['poses'] = torch.Tensor(data_dict['poses'])
return data_dict
def _compute_bbox_by_cam_frustrm_bounded(cfg, HW, Ks, poses, i_train, near, far):
xyz_min = torch.Tensor([np.inf, np.inf, np.inf])
xyz_max = -xyz_min
for (H, W), K, c2w in zip(HW[i_train], Ks[i_train], poses[i_train]):
rays_o, rays_d, viewdirs = dvgo.get_rays_of_a_view(
H=H, W=W, K=K, c2w=c2w,
ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y,
flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y)
if cfg.data.ndc:
pts_nf = torch.stack([rays_o+rays_d*near, rays_o+rays_d*far])
else:
pts_nf = torch.stack([rays_o+viewdirs*near, rays_o+viewdirs*far])
xyz_min = torch.minimum(xyz_min, pts_nf.amin((0,1,2)))
xyz_max = torch.maximum(xyz_max, pts_nf.amax((0,1,2)))
return xyz_min, xyz_max
def compute_bbox_by_cam_frustrm(args, cfg, HW, Ks, poses, i_train, near, far, **kwargs):
print('compute_bbox_by_cam_frustrm: start')
if cfg.data.panorama:
xyz_min, xyz_max = -torch.tensor([far, far, far]).float(), torch.tensor([far, far, far]).float()
else:
xyz_min, xyz_max = _compute_bbox_by_cam_frustrm_bounded(
cfg, HW, Ks, poses, i_train, near, far)
print('compute_bbox_by_cam_frustrm: xyz_min', xyz_min)
print('compute_bbox_by_cam_frustrm: xyz_max', xyz_max)
print('compute_bbox_by_cam_frustrm: finish')
return xyz_min, xyz_max
def create_new_model(cfg, cfg_model, cfg_train, xyz_min, xyz_max, stage, coarse_ckpt_path):
model_kwargs = copy.deepcopy(cfg_model)
num_voxels = model_kwargs.pop('num_voxels')
if len(cfg_train.pg_scale):
num_voxels = int(num_voxels / (2**len(cfg_train.pg_scale)))
if cfg.fine_model_and_render.image_size:
image_size = model_kwargs.pop('image_size')
if len(cfg_train.pg_image_scale):
image_size = (image_size[0] // (2**len(cfg_train.pg_image_scale)), image_size[1] // (2**len(cfg_train.pg_image_scale)))
model_kwargs['image_size'] = image_size
if cfg.fine_model_and_render.equ_size:
equ_size = model_kwargs.pop('equ_size')
if len(cfg_train.pg_equ_scale):
equ_size = (equ_size[0] // (2**len(cfg_train.pg_equ_scale)), equ_size[1] // (2**len(cfg_train.pg_equ_scale)))
model_kwargs['equ_size'] = equ_size
if cfg.data.ndc:
print(f'scene_rep_reconstruction ({stage}): \033[96muse multiplane images\033[0m')
model = dmpigo.DirectMPIGO(
xyz_min=xyz_min, xyz_max=xyz_max,
num_voxels=num_voxels,
**model_kwargs)
elif cfg.data.panorama: | model = dpvgo.DirectPanoramaVoxGO( | 3 | 2023-12-11 05:49:46+00:00 | 8k |
KAIST-VICLab/From_Ground_To_Objects | datasets/kitti_dataset.py | [
{
"identifier": "generate_depth_map",
"path": "datasets/kitti_utils.py",
"snippet": "def generate_depth_map(calib_dir, velo_filename, cam=2, vel_depth=False):\n \"\"\"Generate a depth map from velodyne data\n \"\"\"\n # load calibration files\n cam2cam = read_calib_file(os.path.join(calib_di... | import os
import skimage.transform
import numpy as np
import PIL.Image as pil
from .kitti_utils import generate_depth_map
from .mono_dataset import MonoDataset | 3,825 | # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
os.environ["MKL_NUM_THREADS"] = "1" # noqa F402
os.environ["NUMEXPR_NUM_THREADS"] = "1" # noqa F402
os.environ["OMP_NUM_THREADS"] = "1" # noqa F402
class KITTIDataset(MonoDataset):
"""Superclass for different types of KITTI dataset loaders
"""
def __init__(self, *args, **kwargs):
super(KITTIDataset, self).__init__(*args, **kwargs)
# NOTE: Make sure your intrinsics matrix is *normalized* by the original image size
self.K = np.array([[0.58, 0, 0.5, 0],
[0, 1.92, 0.5, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype=np.float32)
self.full_res_shape = (1242, 375)
self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3}
def check_depth(self):
line = self.filenames[0].split()
scene_name = line[0]
frame_index = int(line[1])
velo_filename = os.path.join(
self.data_path,
scene_name,
"velodyne_points/data/{:010d}.bin".format(int(frame_index)))
return os.path.isfile(velo_filename)
def index_to_folder_and_frame_idx(self, index):
"""Convert index in the dataset to a folder name, frame_idx and any other bits
"""
line = self.filenames[index].split()
folder = line[0]
if len(line) == 3:
frame_index = int(line[1])
else:
frame_index = 0
if len(line) == 3:
side = line[2]
else:
side = None
return folder, frame_index, side
def get_color(self, folder, frame_index, side, do_flip):
color = self.loader(self.get_image_path(folder, frame_index, side))
if do_flip:
color = color.transpose(pil.FLIP_LEFT_RIGHT)
return color
# added part!!
def get_color_path(self, folder, frame_index, side):
# return self.get_image_path(folder, frame_index, side)
f_str = "{:010d}".format(frame_index)
image_path = os.path.join(folder, "image_0{}/data".format(self.side_map[side]), f_str)
return image_path
class KITTIRAWDataset(KITTIDataset):
"""KITTI dataset which loads the original velodyne depth maps for ground truth
"""
def __init__(self, *args, **kwargs):
super(KITTIRAWDataset, self).__init__(*args, **kwargs)
def get_image_path(self, folder, frame_index, side):
f_str = "{:010d}{}".format(frame_index, self.img_ext)
image_path = os.path.join(
self.data_path, folder, "image_0{}/data".format(self.side_map[side]), f_str)
return image_path
def get_depth(self, folder, frame_index, side, do_flip):
calib_path = os.path.join(self.data_path, folder.split("/")[0])
velo_filename = os.path.join(
self.data_path,
folder,
"velodyne_points/data/{:010d}.bin".format(int(frame_index)))
| # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
os.environ["MKL_NUM_THREADS"] = "1" # noqa F402
os.environ["NUMEXPR_NUM_THREADS"] = "1" # noqa F402
os.environ["OMP_NUM_THREADS"] = "1" # noqa F402
class KITTIDataset(MonoDataset):
"""Superclass for different types of KITTI dataset loaders
"""
def __init__(self, *args, **kwargs):
super(KITTIDataset, self).__init__(*args, **kwargs)
# NOTE: Make sure your intrinsics matrix is *normalized* by the original image size
self.K = np.array([[0.58, 0, 0.5, 0],
[0, 1.92, 0.5, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype=np.float32)
self.full_res_shape = (1242, 375)
self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3}
def check_depth(self):
line = self.filenames[0].split()
scene_name = line[0]
frame_index = int(line[1])
velo_filename = os.path.join(
self.data_path,
scene_name,
"velodyne_points/data/{:010d}.bin".format(int(frame_index)))
return os.path.isfile(velo_filename)
def index_to_folder_and_frame_idx(self, index):
"""Convert index in the dataset to a folder name, frame_idx and any other bits
"""
line = self.filenames[index].split()
folder = line[0]
if len(line) == 3:
frame_index = int(line[1])
else:
frame_index = 0
if len(line) == 3:
side = line[2]
else:
side = None
return folder, frame_index, side
def get_color(self, folder, frame_index, side, do_flip):
color = self.loader(self.get_image_path(folder, frame_index, side))
if do_flip:
color = color.transpose(pil.FLIP_LEFT_RIGHT)
return color
# added part!!
def get_color_path(self, folder, frame_index, side):
# return self.get_image_path(folder, frame_index, side)
f_str = "{:010d}".format(frame_index)
image_path = os.path.join(folder, "image_0{}/data".format(self.side_map[side]), f_str)
return image_path
class KITTIRAWDataset(KITTIDataset):
"""KITTI dataset which loads the original velodyne depth maps for ground truth
"""
def __init__(self, *args, **kwargs):
super(KITTIRAWDataset, self).__init__(*args, **kwargs)
def get_image_path(self, folder, frame_index, side):
f_str = "{:010d}{}".format(frame_index, self.img_ext)
image_path = os.path.join(
self.data_path, folder, "image_0{}/data".format(self.side_map[side]), f_str)
return image_path
def get_depth(self, folder, frame_index, side, do_flip):
calib_path = os.path.join(self.data_path, folder.split("/")[0])
velo_filename = os.path.join(
self.data_path,
folder,
"velodyne_points/data/{:010d}.bin".format(int(frame_index)))
| depth_gt = generate_depth_map(calib_path, velo_filename, self.side_map[side]) | 0 | 2023-12-12 08:29:30+00:00 | 8k |
LIU-Yuxin/SyncMVD | src/renderer/project.py | [
{
"identifier": "HardGeometryShader",
"path": "src/renderer/geometry.py",
"snippet": "class HardGeometryShader(ShaderBase):\n\t\"\"\"\n\trenders common geometric informations.\n\t\n\t\n\t\"\"\"\n\n\tdef forward(self, fragments, meshes, **kwargs):\n\t\tcameras = super()._get_cameras(**kwargs)\n\t\ttexels... | import torch
import pytorch3d
import xatlas
import numpy as np
from pytorch3d.io import load_objs_as_meshes, load_obj, save_obj, IO
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
look_at_view_transform,
FoVPerspectiveCameras,
FoVOrthographicCameras,
AmbientLights,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
TexturesUV
)
from .geometry import HardGeometryShader
from .shader import HardNChannelFlatShader
from .voronoi import voronoi_solve
from pytorch3d.io.experimental_gltf_io import MeshGlbFormat | 4,136 | verts_uvs_list = mesh.textures.verts_uvs_list()
# faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()]
new_verts_list = []
for i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)):
verts = verts.clone()
verts_uv = verts_uv.clone()
verts[...,0:2] = verts_uv[...,:]
verts = (verts - 0.5) * 2
verts[...,2] *= 1
new_verts_list.append(verts)
textures_uv = mesh.textures.clone()
self.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv)
return self.mesh_uv
# Set texture for the current mesh.
def set_texture_map(self, texture):
new_map = texture.permute(1, 2, 0)
new_map = new_map.to(self.device)
new_tex = TexturesUV(
[new_map],
self.mesh.textures.faces_uvs_padded(),
self.mesh.textures.verts_uvs_padded(),
sampling_mode=self.sampling_mode
)
self.mesh.textures = new_tex
# Set the initial normal noise texture
# No generator here for replication of the experiment result. Add one as you wish
def set_noise_texture(self, channels=None):
if not channels:
channels = self.channels
noise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device)
self.set_texture_map(noise_texture)
return noise_texture
# Set the cameras given the camera poses and centers
def set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None):
elev = torch.FloatTensor([pose[0] for pose in camera_poses])
azim = torch.FloatTensor([pose[1] for pose in camera_poses])
R, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),))
self.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),))
# Set all necessary internal data for rendering and texture baking
# Can be used to refresh after changing camera positions
def set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None):
self.set_cameras(camera_poses, centers, camera_distance, scale=scale)
if render_size is None:
render_size = self.render_size
if not hasattr(self, "renderer"):
self.setup_renderer(size=render_size)
if not hasattr(self, "mesh_d"):
self.disconnect_faces()
if not hasattr(self, "mesh_uv"):
self.construct_uv_mesh()
self.calculate_tex_gradient()
self.calculate_visible_triangle_mask()
_,_,_,cos_maps,_, _ = self.render_geometry()
self.calculate_cos_angle_weights(cos_maps)
# Setup renderers for rendering
# max faces per bin set to 30000 to avoid overflow in many test cases.
# You can use default value to let pytorch3d handle that for you.
def setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None):
if not channels:
channels = self.channels
self.raster_settings = RasterizationSettings(
image_size=size,
blur_radius=blur,
faces_per_pixel=face_per_pix,
perspective_correct=perspective_correct,
cull_backfaces=True,
max_faces_per_bin=30000,
)
self.renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=self.cameras,
raster_settings=self.raster_settings,
),
shader=HardNChannelFlatShader(
device=self.device,
cameras=self.cameras,
lights=self.lights,
channels=channels
# materials=materials
)
)
# Bake screen-space cosine weights to UV space
# May be able to reimplement using the generic "bake_texture" function, but it works so leave it here for now
@torch.enable_grad()
def calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None):
if not channels:
channels = self.channels
cos_maps = []
tmp_mesh = self.mesh.clone()
for i in range(len(self.cameras)):
zero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)
optimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)
optimizer.zero_grad()
zero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)
tmp_mesh.textures = zero_tex
images_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)
loss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2)
loss.backward()
optimizer.step()
if fill:
zero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8)
|
# Pytorch3D based renderering functions, managed in a class
# Render size is recommended to be the same as your latent view size
# DO NOT USE "bilinear" sampling when you are handling latents.
# Stable Diffusion has 4 latent channels so use channels=4
class UVProjection():
def __init__(self, texture_size=96, render_size=64, sampling_mode="nearest", channels=3, device=None):
self.channels = channels
self.device = device or torch.device("cpu")
self.lights = AmbientLights(ambient_color=((1.0,)*channels,), device=self.device)
self.target_size = (texture_size,texture_size)
self.render_size = render_size
self.sampling_mode = sampling_mode
# Load obj mesh, rescale the mesh to fit into the bounding box
def load_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):
mesh = load_objs_as_meshes([mesh_path], device=self.device)
if auto_center:
verts = mesh.verts_packed()
max_bb = (verts - 0).max(0)[0]
min_bb = (verts - 0).min(0)[0]
scale = (max_bb - min_bb).max()/2
center = (max_bb+min_bb) /2
mesh.offset_verts_(-center)
mesh.scale_verts_((scale_factor / float(scale)))
else:
mesh.scale_verts_((scale_factor))
if autouv or (mesh.textures is None):
mesh = self.uv_unwrap(mesh)
self.mesh = mesh
def load_glb_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):
io = IO()
io.register_meshes_format(MeshGlbFormat())
with open(mesh_path, "rb") as f:
mesh = io.load_mesh(f, include_textures=True, device=self.device)
if auto_center:
verts = mesh.verts_packed()
max_bb = (verts - 0).max(0)[0]
min_bb = (verts - 0).min(0)[0]
scale = (max_bb - min_bb).max()/2
center = (max_bb+min_bb) /2
mesh.offset_verts_(-center)
mesh.scale_verts_((scale_factor / float(scale)))
else:
mesh.scale_verts_((scale_factor))
if autouv or (mesh.textures is None):
mesh = self.uv_unwrap(mesh)
self.mesh = mesh
# Save obj mesh
def save_mesh(self, mesh_path, texture):
save_obj(mesh_path,
self.mesh.verts_list()[0],
self.mesh.faces_list()[0],
verts_uvs= self.mesh.textures.verts_uvs_list()[0],
faces_uvs= self.mesh.textures.faces_uvs_list()[0],
texture_map=texture)
# Code referred to TEXTure code (https://github.com/TEXTurePaper/TEXTurePaper.git)
def uv_unwrap(self, mesh):
verts_list = mesh.verts_list()[0]
faces_list = mesh.faces_list()[0]
v_np = verts_list.cpu().numpy()
f_np = faces_list.int().cpu().numpy()
atlas = xatlas.Atlas()
atlas.add_mesh(v_np, f_np)
chart_options = xatlas.ChartOptions()
chart_options.max_iterations = 4
atlas.generate(chart_options=chart_options)
vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]
vt = torch.from_numpy(vt_np.astype(np.float32)).type(verts_list.dtype).to(mesh.device)
ft = torch.from_numpy(ft_np.astype(np.int64)).type(faces_list.dtype).to(mesh.device)
new_map = torch.zeros(self.target_size+(self.channels,), device=mesh.device)
new_tex = TexturesUV(
[new_map],
[ft],
[vt],
sampling_mode=self.sampling_mode
)
mesh.textures = new_tex
return mesh
'''
A functions that disconnect faces in the mesh according to
its UV seams. The number of vertices are made equal to the
number of unique vertices its UV layout, while the faces list
is intact.
'''
def disconnect_faces(self):
mesh = self.mesh
verts_list = mesh.verts_list()
faces_list = mesh.faces_list()
verts_uvs_list = mesh.textures.verts_uvs_list()
faces_uvs_list = mesh.textures.faces_uvs_list()
packed_list = [v[f] for v,f in zip(verts_list, faces_list)]
verts_disconnect_list = [
torch.zeros(
(verts_uvs_list[i].shape[0], 3),
dtype=verts_list[0].dtype,
device=verts_list[0].device
)
for i in range(len(verts_list))]
for i in range(len(verts_list)):
verts_disconnect_list[i][faces_uvs_list] = packed_list[i]
assert not mesh.has_verts_normals(), "Not implemented for vertex normals"
self.mesh_d = Meshes(verts_disconnect_list, faces_uvs_list, mesh.textures)
return self.mesh_d
'''
A function that construct a temp mesh for back-projection.
Take a disconnected mesh and a rasterizer, the function calculates
the projected faces as the UV, as use its original UV with pseudo
z value as world space geometry.
'''
def construct_uv_mesh(self):
mesh = self.mesh_d
verts_list = mesh.verts_list()
verts_uvs_list = mesh.textures.verts_uvs_list()
# faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()]
new_verts_list = []
for i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)):
verts = verts.clone()
verts_uv = verts_uv.clone()
verts[...,0:2] = verts_uv[...,:]
verts = (verts - 0.5) * 2
verts[...,2] *= 1
new_verts_list.append(verts)
textures_uv = mesh.textures.clone()
self.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv)
return self.mesh_uv
# Set texture for the current mesh.
def set_texture_map(self, texture):
new_map = texture.permute(1, 2, 0)
new_map = new_map.to(self.device)
new_tex = TexturesUV(
[new_map],
self.mesh.textures.faces_uvs_padded(),
self.mesh.textures.verts_uvs_padded(),
sampling_mode=self.sampling_mode
)
self.mesh.textures = new_tex
# Set the initial normal noise texture
# No generator here for replication of the experiment result. Add one as you wish
def set_noise_texture(self, channels=None):
if not channels:
channels = self.channels
noise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device)
self.set_texture_map(noise_texture)
return noise_texture
# Set the cameras given the camera poses and centers
def set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None):
elev = torch.FloatTensor([pose[0] for pose in camera_poses])
azim = torch.FloatTensor([pose[1] for pose in camera_poses])
R, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),))
self.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),))
# Set all necessary internal data for rendering and texture baking
# Can be used to refresh after changing camera positions
def set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None):
self.set_cameras(camera_poses, centers, camera_distance, scale=scale)
if render_size is None:
render_size = self.render_size
if not hasattr(self, "renderer"):
self.setup_renderer(size=render_size)
if not hasattr(self, "mesh_d"):
self.disconnect_faces()
if not hasattr(self, "mesh_uv"):
self.construct_uv_mesh()
self.calculate_tex_gradient()
self.calculate_visible_triangle_mask()
_,_,_,cos_maps,_, _ = self.render_geometry()
self.calculate_cos_angle_weights(cos_maps)
# Setup renderers for rendering
# max faces per bin set to 30000 to avoid overflow in many test cases.
# You can use default value to let pytorch3d handle that for you.
def setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None):
if not channels:
channels = self.channels
self.raster_settings = RasterizationSettings(
image_size=size,
blur_radius=blur,
faces_per_pixel=face_per_pix,
perspective_correct=perspective_correct,
cull_backfaces=True,
max_faces_per_bin=30000,
)
self.renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=self.cameras,
raster_settings=self.raster_settings,
),
shader=HardNChannelFlatShader(
device=self.device,
cameras=self.cameras,
lights=self.lights,
channels=channels
# materials=materials
)
)
# Bake screen-space cosine weights to UV space
# May be able to reimplement using the generic "bake_texture" function, but it works so leave it here for now
@torch.enable_grad()
def calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None):
if not channels:
channels = self.channels
cos_maps = []
tmp_mesh = self.mesh.clone()
for i in range(len(self.cameras)):
zero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)
optimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)
optimizer.zero_grad()
zero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)
tmp_mesh.textures = zero_tex
images_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)
loss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2)
loss.backward()
optimizer.step()
if fill:
zero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8) | zero_map = voronoi_solve(zero_map, self.gradient_maps[i][...,0]) | 2 | 2023-12-09 03:27:58+00:00 | 8k |
jinxixiang/magic_animate_unofficial | animatediff/magic_animate/unet_3d_blocks.py | [
{
"identifier": "Transformer3DModel",
"path": "animatediff/magic_animate/attention.py",
"snippet": "class Transformer3DModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_cha... | import torch
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module | 4,512 | unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Adapted from https://github.com/guoyww/AnimateDiff
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [ | ResnetBlock3D( | 2 | 2023-12-12 00:16:39+00:00 | 8k |
Chat-3D/Chat-3D-v2 | models/chat3d.py | [
{
"identifier": "LlamaForCausalLM",
"path": "models/modeling_llama.py",
"snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n\n self.lm_head = nn.Linear(config.hidden_size, config.voc... | import random
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import contextlib
from abc import ABC
from torch.cuda.amp import autocast as autocast
from .modeling_llama import LlamaForCausalLM
from transformers import LlamaTokenizer, LlamaConfig
from models.transformer_vanilla import TransformerEncoder, CMT
from models.helpers import GenericMLP
from models.position_embedding import PositionEmbeddingCoordsSine, PositionalEmbedding
from transformers import StoppingCriteria, StoppingCriteriaList | 6,010 | module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
return _init_weights
class CustomGradLayer(torch.autograd.Function):
@staticmethod
def forward(ctx, input, coefficient=1.0):
ctx.coefficient = coefficient
return input
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output * ctx.coefficient
return grad_input, None
class Chat3D(nn.Module):
"""
VideoChat model.
"""
def __init__(self, config):
super().__init__()
llama_model_path = config.get("llama_model_path")
low_resource = config.get("low_resource", False)
# prompt
self.prompt_template = config.get("prompt_template", "")
self.max_txt_len = config.get("max_txt_len", 32)
self.end_sym = config.get("end_sym", '\n')
self.system_path = config.get("system_path", "")
self.begin_signal = "###"
self.role = ("Human", "Assistant")
self.pc_start_token, self.pc_end_token = "<Target>", "</Target>"
self.scene_start_token, self.scene_end_token = "<Scene>", "</Scene>"
self.add_scene_token = config.get("add_scene_token", True)
self.debug = config.get("debug", False)
self.obj_norm_scale = config.get("obj_norm_scale", 1)
self.scene_norm_scale = config.get("scene_norm_scale", 1)
self.grad_scale = config.get("grad_scale", 1)
mlp_dropout = config.get("mlp_dropout", 0.5)
self.stage = config.get("stage", 1)
self.low_resource = low_resource
self.input_dim = config.get("input_dim", 512)
self.attr_dim = config.get("attr_dim", 512)
self.inter_dim = self.input_dim + self.attr_dim * 2
if not self.debug:
logger.info('Loading LLAMA')
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
if self.low_resource:
self.llama_model = LlamaForCausalLM.from_pretrained(
llama_model_path,
torch_dtype=torch.float16,
load_in_8bit=True,
device_map="auto"
)
else:
self.llama_model = LlamaForCausalLM.from_pretrained(
llama_model_path,
torch_dtype=torch.float16,
)
logger.info("freeze LLAMA")
for name, param in self.llama_model.named_parameters():
param.requires_grad = False
# if self.stage != 1:
# for layer_ind in range(30, 32):
# for param in self.llama_model.model.layers[layer_ind].parameters():
# param.requires_grad = True
# param.data = param.data.float()
self.llama_dim = self.llama_model.config.hidden_size
logger.info('Loading LLAMA Done')
else:
self.llama_model = None
self.llama_dim = 4096
# self.object_input_proj = nn.Sequential(
# nn.Linear(self.input_dim, self.input_dim),
# # nn.ReLU(),
# # nn.LayerNorm(self.input_dim),
# )
self.coord_proj = nn.Sequential(
nn.Linear(3, self.attr_dim),
# nn.ReLU(),
# nn.LayerNorm(self.attr_dim),
# nn.Dropout(mlp_dropout)
)
self.color_proj = nn.Sequential(
nn.Linear(3, self.attr_dim),
# nn.ReLU(),
# nn.LayerNorm(self.attr_dim),
# nn.Dropout(mlp_dropout)
)
# self.color_dropout = nn.Dropout(mlp_dropout)
# self.pos_proj = nn.Sequential(
# nn.Linear(6, self.inter_dim),
# nn.LayerNorm(self.inter_dim)
# )
# self.pos_embedding = PositionalEmbedding(dim=self.llama_dim)
self.pos_proj = nn.Sequential(
nn.Linear(3, self.llama_dim)
)
self.object_proj = nn.Sequential(
nn.Linear(self.inter_dim, self.llama_dim),
nn.GELU(),
nn.Dropout(mlp_dropout),
nn.LayerNorm(self.llama_dim),
nn.Linear(self.llama_dim, self.llama_dim)
)
self.scene_proj = nn.Sequential(
nn.Linear(self.llama_dim, self.llama_dim),
)
self.encoder_num_layers = int(config.get("encoder_num_layers", 1))
|
logger = logging.getLogger(__name__)
class StoppingCriteriaSub(StoppingCriteria):
def __init__(self, stops=[], encounters=1):
super().__init__()
self.stops = stops
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
for stop in self.stops:
if torch.all((stop == input_ids[0][-len(stop):])).item():
return True
return False
def init_weights(std=0.02):
def _init_weights(module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
return _init_weights
class CustomGradLayer(torch.autograd.Function):
@staticmethod
def forward(ctx, input, coefficient=1.0):
ctx.coefficient = coefficient
return input
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output * ctx.coefficient
return grad_input, None
class Chat3D(nn.Module):
"""
VideoChat model.
"""
def __init__(self, config):
super().__init__()
llama_model_path = config.get("llama_model_path")
low_resource = config.get("low_resource", False)
# prompt
self.prompt_template = config.get("prompt_template", "")
self.max_txt_len = config.get("max_txt_len", 32)
self.end_sym = config.get("end_sym", '\n')
self.system_path = config.get("system_path", "")
self.begin_signal = "###"
self.role = ("Human", "Assistant")
self.pc_start_token, self.pc_end_token = "<Target>", "</Target>"
self.scene_start_token, self.scene_end_token = "<Scene>", "</Scene>"
self.add_scene_token = config.get("add_scene_token", True)
self.debug = config.get("debug", False)
self.obj_norm_scale = config.get("obj_norm_scale", 1)
self.scene_norm_scale = config.get("scene_norm_scale", 1)
self.grad_scale = config.get("grad_scale", 1)
mlp_dropout = config.get("mlp_dropout", 0.5)
self.stage = config.get("stage", 1)
self.low_resource = low_resource
self.input_dim = config.get("input_dim", 512)
self.attr_dim = config.get("attr_dim", 512)
self.inter_dim = self.input_dim + self.attr_dim * 2
if not self.debug:
logger.info('Loading LLAMA')
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
if self.low_resource:
self.llama_model = LlamaForCausalLM.from_pretrained(
llama_model_path,
torch_dtype=torch.float16,
load_in_8bit=True,
device_map="auto"
)
else:
self.llama_model = LlamaForCausalLM.from_pretrained(
llama_model_path,
torch_dtype=torch.float16,
)
logger.info("freeze LLAMA")
for name, param in self.llama_model.named_parameters():
param.requires_grad = False
# if self.stage != 1:
# for layer_ind in range(30, 32):
# for param in self.llama_model.model.layers[layer_ind].parameters():
# param.requires_grad = True
# param.data = param.data.float()
self.llama_dim = self.llama_model.config.hidden_size
logger.info('Loading LLAMA Done')
else:
self.llama_model = None
self.llama_dim = 4096
# self.object_input_proj = nn.Sequential(
# nn.Linear(self.input_dim, self.input_dim),
# # nn.ReLU(),
# # nn.LayerNorm(self.input_dim),
# )
self.coord_proj = nn.Sequential(
nn.Linear(3, self.attr_dim),
# nn.ReLU(),
# nn.LayerNorm(self.attr_dim),
# nn.Dropout(mlp_dropout)
)
self.color_proj = nn.Sequential(
nn.Linear(3, self.attr_dim),
# nn.ReLU(),
# nn.LayerNorm(self.attr_dim),
# nn.Dropout(mlp_dropout)
)
# self.color_dropout = nn.Dropout(mlp_dropout)
# self.pos_proj = nn.Sequential(
# nn.Linear(6, self.inter_dim),
# nn.LayerNorm(self.inter_dim)
# )
# self.pos_embedding = PositionalEmbedding(dim=self.llama_dim)
self.pos_proj = nn.Sequential(
nn.Linear(3, self.llama_dim)
)
self.object_proj = nn.Sequential(
nn.Linear(self.inter_dim, self.llama_dim),
nn.GELU(),
nn.Dropout(mlp_dropout),
nn.LayerNorm(self.llama_dim),
nn.Linear(self.llama_dim, self.llama_dim)
)
self.scene_proj = nn.Sequential(
nn.Linear(self.llama_dim, self.llama_dim),
)
self.encoder_num_layers = int(config.get("encoder_num_layers", 1)) | self.relation_module = CMT(hidden_size=self.llama_dim, num_layers=self.encoder_num_layers) | 2 | 2023-12-11 14:39:58+00:00 | 8k |
SqueezeBits/owlite | owlite/nn/modules/qmodule_mixins.py | [
{
"identifier": "log",
"path": "owlite/logger.py",
"snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n ... | from typing import Optional
from ...logger import log
from ..fake_quantizer import FakeQuantizer
import torch | 3,972 | """ Util classes using at quantized modules"""
class UnaryNeuralQModuleMixin:
"""
Mixin-class for implementing weight-quantized counterparts of subclasses
of torch.nn.Module with the parameters named 'weight' and 'bias'
such that whose `forward` method takes exactly one parameter other than 'self'.
Examples: `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, `torch.nn.Linear`
"""
weight: torch.nn.Parameter
bias: Optional[torch.nn.Parameter]
| """ Util classes using at quantized modules"""
class UnaryNeuralQModuleMixin:
"""
Mixin-class for implementing weight-quantized counterparts of subclasses
of torch.nn.Module with the parameters named 'weight' and 'bias'
such that whose `forward` method takes exactly one parameter other than 'self'.
Examples: `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, `torch.nn.Linear`
"""
weight: torch.nn.Parameter
bias: Optional[torch.nn.Parameter] | input_quantizer: Optional[FakeQuantizer] | 1 | 2023-12-08 06:41:50+00:00 | 8k |
ximinng/PyTorch-SVGRender | pytorch_svgrender/painter/wordasimage/painter_params.py | [
{
"identifier": "DiffVGState",
"path": "pytorch_svgrender/diffvg_warp/diffvg_state.py",
"snippet": "class DiffVGState(torch.nn.Module):\n\n def __init__(self,\n device: torch.device,\n use_gpu: bool = torch.cuda.is_available(),\n print_timing: bool = Fa... | import os
import pathlib
import numpy as np
import pydiffvg
import torch
from torch.optim.lr_scheduler import LambdaLR
from pytorch_svgrender.diffvg_warp import DiffVGState
from .ttf import font_string_to_beziers, write_letter_svg | 4,758 |
class Painter(DiffVGState):
def __init__(self,
font: str,
canvas_size: int,
device: torch.device):
super(Painter, self).__init__(device=device, use_gpu=True, canvas_width=canvas_size, canvas_height=canvas_size)
self.font = font
def init_shape(self, path_svg, seed=0):
assert pathlib.Path(path_svg).exists(), f"{path_svg} is not exist!"
print(f"-> init svg from `{path_svg}` ...")
# 1. load svg from path
canvas_width, canvas_height, self.shapes, self.shape_groups = self.load_svg(path_svg)
# 2. set learnable parameters
self.set_point_parameters()
img = self.render_warp(seed)
img = img[:, :, 3:4] * img[:, :, :3] + \
torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4])
img = img[:, :, :3]
img = img.unsqueeze(0) # convert img from HWC to NCHW
img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW
return img
def get_image(self, step: int = 0):
img = self.render_warp(step)
img = img[:, :, 3:4] * img[:, :, :3] + \
torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4])
img = img[:, :, :3]
img = img.unsqueeze(0) # convert img from HWC to NCHW
img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW
return img
def clip_curve_shape(self):
for group in self.shape_groups:
group.fill_color.data.clamp_(0.0, 1.0)
def set_point_parameters(self): # stroke`s location optimization
self.point_vars = []
for i, path in enumerate(self.shapes):
path.points.requires_grad = True
self.point_vars.append(path.points)
def get_point_parameters(self):
return self.point_vars
def preprocess_font(self, word, letter, level_of_cc=1, font_path=None, init_path=None):
if level_of_cc == 0:
target_cp = None
else:
target_cp = {"A": 120, "B": 120, "C": 100, "D": 100,
"E": 120, "F": 120, "G": 120, "H": 120,
"I": 35, "J": 80, "K": 100, "L": 80,
"M": 100, "N": 100, "O": 100, "P": 120,
"Q": 120, "R": 130, "S": 110, "T": 90,
"U": 100, "V": 100, "W": 100, "X": 130,
"Y": 120, "Z": 120,
"a": 120, "b": 120, "c": 100, "d": 100,
"e": 120, "f": 120, "g": 120, "h": 120,
"i": 35, "j": 80, "k": 100, "l": 80,
"m": 100, "n": 100, "o": 100, "p": 120,
"q": 120, "r": 130, "s": 110, "t": 90,
"u": 100, "v": 100, "w": 100, "x": 130,
"y": 120, "z": 120}
target_cp = {k: v * level_of_cc for k, v in target_cp.items()}
print("init_path: ", init_path)
subdivision_thresh = None
self.font_string_to_svgs(init_path,
font_path,
word,
target_control=target_cp,
subdivision_thresh=subdivision_thresh)
self.normalize_letter_size(init_path, font_path, word)
# optimize two adjacent letters
print("letter: ", letter)
if len(letter) > 1:
subdivision_thresh = None
self.font_string_to_svgs(init_path,
font_path,
letter,
target_control=target_cp,
subdivision_thresh=subdivision_thresh)
self.normalize_letter_size(init_path, font_path, letter)
print("preprocess_font done.")
def font_string_to_svgs(self, dest_path, font, txt, size=30, spacing=1.0, target_control=None,
subdivision_thresh=None):
fontname = self.font
|
class Painter(DiffVGState):
def __init__(self,
font: str,
canvas_size: int,
device: torch.device):
super(Painter, self).__init__(device=device, use_gpu=True, canvas_width=canvas_size, canvas_height=canvas_size)
self.font = font
def init_shape(self, path_svg, seed=0):
assert pathlib.Path(path_svg).exists(), f"{path_svg} is not exist!"
print(f"-> init svg from `{path_svg}` ...")
# 1. load svg from path
canvas_width, canvas_height, self.shapes, self.shape_groups = self.load_svg(path_svg)
# 2. set learnable parameters
self.set_point_parameters()
img = self.render_warp(seed)
img = img[:, :, 3:4] * img[:, :, :3] + \
torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4])
img = img[:, :, :3]
img = img.unsqueeze(0) # convert img from HWC to NCHW
img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW
return img
def get_image(self, step: int = 0):
img = self.render_warp(step)
img = img[:, :, 3:4] * img[:, :, :3] + \
torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4])
img = img[:, :, :3]
img = img.unsqueeze(0) # convert img from HWC to NCHW
img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW
return img
def clip_curve_shape(self):
for group in self.shape_groups:
group.fill_color.data.clamp_(0.0, 1.0)
def set_point_parameters(self): # stroke`s location optimization
self.point_vars = []
for i, path in enumerate(self.shapes):
path.points.requires_grad = True
self.point_vars.append(path.points)
def get_point_parameters(self):
return self.point_vars
def preprocess_font(self, word, letter, level_of_cc=1, font_path=None, init_path=None):
if level_of_cc == 0:
target_cp = None
else:
target_cp = {"A": 120, "B": 120, "C": 100, "D": 100,
"E": 120, "F": 120, "G": 120, "H": 120,
"I": 35, "J": 80, "K": 100, "L": 80,
"M": 100, "N": 100, "O": 100, "P": 120,
"Q": 120, "R": 130, "S": 110, "T": 90,
"U": 100, "V": 100, "W": 100, "X": 130,
"Y": 120, "Z": 120,
"a": 120, "b": 120, "c": 100, "d": 100,
"e": 120, "f": 120, "g": 120, "h": 120,
"i": 35, "j": 80, "k": 100, "l": 80,
"m": 100, "n": 100, "o": 100, "p": 120,
"q": 120, "r": 130, "s": 110, "t": 90,
"u": 100, "v": 100, "w": 100, "x": 130,
"y": 120, "z": 120}
target_cp = {k: v * level_of_cc for k, v in target_cp.items()}
print("init_path: ", init_path)
subdivision_thresh = None
self.font_string_to_svgs(init_path,
font_path,
word,
target_control=target_cp,
subdivision_thresh=subdivision_thresh)
self.normalize_letter_size(init_path, font_path, word)
# optimize two adjacent letters
print("letter: ", letter)
if len(letter) > 1:
subdivision_thresh = None
self.font_string_to_svgs(init_path,
font_path,
letter,
target_control=target_cp,
subdivision_thresh=subdivision_thresh)
self.normalize_letter_size(init_path, font_path, letter)
print("preprocess_font done.")
def font_string_to_svgs(self, dest_path, font, txt, size=30, spacing=1.0, target_control=None,
subdivision_thresh=None):
fontname = self.font | glyph_beziers = font_string_to_beziers(font, txt, size, spacing, merge=False, target_control=target_control) | 1 | 2023-12-13 08:18:01+00:00 | 8k |
lyhisme/DeST | libs/helper.py | [
{
"identifier": "get_id2class_map",
"path": "libs/class_id_map.py",
"snippet": "def get_id2class_map(dataset: str, dataset_dir: str = \"./dataset\") -> Dict[int, str]:\n class2id_map = get_class2id_map(dataset, dataset_dir)\n\n return {val: key for key, val in class2id_map.items()}"
},
{
"... | import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from typing import Optional, Tuple
from torch.utils.data import DataLoader
from libs.class_id_map import get_id2class_map
from libs.metric import AverageMeter, BoundaryScoreMeter, ScoreMeter
from libs.postprocess import PostProcessor | 4,692 |
def train(
train_loader: DataLoader,
model: nn.Module,
criterion_cls: nn.Module,
criterion_bound: nn.Module,
lambda_bound_loss: float,
optimizer: optim.Optimizer,
device: str,
) -> float:
losses = AverageMeter("Loss", ":.4e")
# switch training mode
model.train()
for sample in train_loader:
x = sample["feature"]
t = sample["label"]
b = sample["boundary"]
mask = sample["mask"]
x = x.to(device)
t = t.to(device)
b = b.to(device)
mask = mask.to(device)
batch_size = x.shape[0]
# compute output and loss
output_cls, output_bound = model(x, mask)
loss = 0.0
if isinstance(output_cls, list):
n = len(output_cls)
for out in output_cls:
loss += criterion_cls(out, t, x) / n
else:
loss += criterion_cls(output_cls, t, x)
if isinstance(output_bound, list):
n = len(output_bound)
for out in output_bound:
loss += lambda_bound_loss * criterion_bound(out, b, mask) / n
else:
loss += lambda_bound_loss * criterion_bound(output_bound, b, mask)
# record loss
losses.update(loss.item(), batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return losses.avg
def validate(
val_loader: DataLoader,
model: nn.Module,
criterion_cls: nn.Module,
criterion_bound: nn.Module,
lambda_bound_loss: float,
device: str,
dataset: str,
dataset_dir: str,
iou_thresholds: Tuple[float],
boundary_th: float,
tolerance: int,
refinement_method: Optional[str] = None
) -> Tuple[float, float, float, float, float, float, float, float, str]:
losses = AverageMeter("Loss", ":.4e")
postprocessor = PostProcessor(refinement_method, boundary_th)
scores_cls = ScoreMeter(
id2class_map=get_id2class_map(dataset, dataset_dir=dataset_dir),
iou_thresholds=iou_thresholds,
)
|
def train(
train_loader: DataLoader,
model: nn.Module,
criterion_cls: nn.Module,
criterion_bound: nn.Module,
lambda_bound_loss: float,
optimizer: optim.Optimizer,
device: str,
) -> float:
losses = AverageMeter("Loss", ":.4e")
# switch training mode
model.train()
for sample in train_loader:
x = sample["feature"]
t = sample["label"]
b = sample["boundary"]
mask = sample["mask"]
x = x.to(device)
t = t.to(device)
b = b.to(device)
mask = mask.to(device)
batch_size = x.shape[0]
# compute output and loss
output_cls, output_bound = model(x, mask)
loss = 0.0
if isinstance(output_cls, list):
n = len(output_cls)
for out in output_cls:
loss += criterion_cls(out, t, x) / n
else:
loss += criterion_cls(output_cls, t, x)
if isinstance(output_bound, list):
n = len(output_bound)
for out in output_bound:
loss += lambda_bound_loss * criterion_bound(out, b, mask) / n
else:
loss += lambda_bound_loss * criterion_bound(output_bound, b, mask)
# record loss
losses.update(loss.item(), batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return losses.avg
def validate(
val_loader: DataLoader,
model: nn.Module,
criterion_cls: nn.Module,
criterion_bound: nn.Module,
lambda_bound_loss: float,
device: str,
dataset: str,
dataset_dir: str,
iou_thresholds: Tuple[float],
boundary_th: float,
tolerance: int,
refinement_method: Optional[str] = None
) -> Tuple[float, float, float, float, float, float, float, float, str]:
losses = AverageMeter("Loss", ":.4e")
postprocessor = PostProcessor(refinement_method, boundary_th)
scores_cls = ScoreMeter(
id2class_map=get_id2class_map(dataset, dataset_dir=dataset_dir),
iou_thresholds=iou_thresholds,
) | scores_bound = BoundaryScoreMeter( | 2 | 2023-12-12 02:27:15+00:00 | 8k |
bolna-ai/bolna | bolna/agent_manager/task_manager.py | [
{
"identifier": "BaseManager",
"path": "bolna/agent_manager/base_manager.py",
"snippet": "class BaseManager:\n def __init__(self):\n self.agent = \"bolna-agent\""
},
{
"identifier": "create_ws_data_packet",
"path": "bolna/helpers/utils.py",
"snippet": "def create_ws_data_packet... | import asyncio
import traceback
import time
import json
from .base_manager import BaseManager
from bolna.agent_types import *
from bolna.providers import *
from bolna.helpers.utils import create_ws_data_packet, is_valid_md5, get_raw_audio_bytes_from_base64, \
get_required_input_types, format_messages, get_prompt_responses, update_prompt_with_context, get_md5_hash, clean_json_string, yield_chunks_from_memory
from bolna.helpers.logger_config import configure_logger | 4,273 | await self.tools["output"].handle_interruption()
self.sequence_ids = set() #Remove all the sequence ids so subsequent won't be processed
if self.llm_task is not None:
self.llm_task.cancel()
self.llm_task = None
self.was_long_pause = True
# if len(self.synthesizer_tasks) > 0:
# for synth_task in self.synthesizer_tasks:
# synth_task.cancel()
# self.synthesizer_tasks = []
########################
# Transcriber task
########################
async def _handle_transcriber_output(self, next_task, transcriber_message, meta_info):
if next_task == "llm":
meta_info["origin"] = "transcriber"
self.llm_task = asyncio.create_task(
self._run_llm_task(create_ws_data_packet(transcriber_message, meta_info)))
elif next_task == "synthesizer":
self.synthesizer_tasks.append(asyncio.create_task(
self._synthesize(create_ws_data_packet(transcriber_message, meta_info))))
else:
logger.info(f"Need to separate out output task")
async def _listen_transcriber(self):
transcriber_message = ""
start_time = None
try:
if self.stream:
async for message in self.tools["transcriber"].transcribe():
if message['data'] == "transcriber_connection_closed":
self.transcriber_duration += message['meta_info']["transcriber_duration"]
logger.info("transcriber connection closed")
return
self._set_call_details(message)
meta_info = message["meta_info"]
sequence = await self.process_transcriber_request(meta_info)
if message['data'] == "TRANSCRIBER_BEGIN":
logger.info("starting transcriber stream")
start_time = time.time()
await self.tools["output"].handle_interruption()
if self.llm_task is not None:
logger.info("Cancelling LLM Task as it's on")
self.llm_task.cancel()
self.llm_task = None
self.was_long_pause = True
if len(self.synthesizer_tasks) > 0:
logger.info("Cancelling Synthesizer tasks")
for synth_task in self.synthesizer_tasks:
synth_task.cancel()
self.synthesizer_tasks = []
continue
elif message['data'] == "TRANSCRIBER_END":
logger.info("transcriber stream and preparing the next step")
next_task = self._get_next_step(sequence, "transcriber")
logger.info(f'got the next task {next_task}')
if self.was_long_pause:
logger.info(
f"Seems like there was a long pause {self.history[-1]['content']} , {transcriber_message}")
message = self.history[-1]['content'] + " " + transcriber_message
self.history = self.history[:-1]
self.was_long_pause = False
logger.info(f'invoking next_task {next_task} with transcriber_message: {transcriber_message}')
await self._handle_transcriber_output(next_task, transcriber_message, meta_info)
transcriber_message = ""
continue
else:
logger.info("processed text from transcriber: {}".format(message['data']))
transcriber_message += message['data']
else:
logger.info("Not a streaming conversation. Hence getting a full blown transcript")
async for message in self.tools["transcriber"].transcribe():
logger.info(f"message from transcriber {message}")
sequence = message["meta_info"]["sequence"]
next_task = self._get_next_step(sequence, "transcriber")
self.transcriber_duration += message["meta_info"]["transcriber_duration"] if "transcriber_duration" in message["meta_info"] else 0
await self._handle_transcriber_output(next_task, message['data'], message["meta_info"])
except Exception as e:
traceback.print_exc()
logger.error(f"Error in transcriber {e}")
async def __listen_synthesizer(self):
try:
if self.stream and self.synthesizer_provider != "polly":
logger.info("Opening websocket connection to synthesizer")
await self.tools["synthesizer"].open_connection()
while True:
logger.info("Listening to synthesizer")
async for message in self.tools["synthesizer"].generate():
if not self.conversation_ended and message["meta_info"]["sequence_id"] in self.sequence_ids:
await self.tools["output"].handle(message)
if "end_of_synthesizer_stream" in message["meta_info"] and message["meta_info"]["end_of_synthesizer_stream"]:
logger.info(f"Got End of stream and hence removing from sequence ids {self.sequence_ids} {message['meta_info']['sequence_id']}")
self.sequence_ids.remove(message["meta_info"]["sequence_id"])
await asyncio.sleep(1)
except Exception as e:
logger.error(f"Error in synthesizer {e}")
async def _synthesize(self, message):
meta_info = message["meta_info"]
text = message["data"]
meta_info["type"] = "audio"
try:
if meta_info["is_md5_hash"]:
logger.info('sending preprocessed audio response to {}'.format(
self.task_config["tools_config"]["output"]["provider"]))
|
logger = configure_logger(__name__)
class TaskManager(BaseManager):
def __init__(self, assistant_name, task_id, task, ws, input_parameters=None, context_data=None, user_id=None,
assistant_id=None, run_id=None, connected_through_dashboard=False, cache = None):
super().__init__()
logger.info(f"doing task {task}")
self.task_id = task_id
self.assistant_name = assistant_name
self.tools = {}
self.websocket = ws
self.task_config = task
self.context_data = context_data
self.connected_through_dashboard = connected_through_dashboard
# Set up communication queues between processes
self.audio_queue = asyncio.Queue()
self.llm_queue = asyncio.Queue()
self.synthesizer_queue = asyncio.Queue()
self.pipelines = task['toolchain']['pipelines']
self.textual_chat_agent = False
if task['toolchain']['pipelines'][0] == "llm" and task["tools_config"]["llm_agent"][
"agent_task"] == "conversation":
self.textual_chat_agent = False
self.start_time = time.time()
# Assistant persistance stuff
self.user_id = user_id
self.assistant_id = assistant_id
self.run_id = run_id
self.mark_set = set()
self.conversation_ended = False
# Prompts
self.prompts, self.system_prompt = {}, {}
self.input_parameters = input_parameters
self.queues = {
"transcriber": self.audio_queue,
"llm": self.llm_queue,
"synthesizer": self.synthesizer_queue
}
if task_id == 0:
if self.task_config["tools_config"]["input"]["provider"] in SUPPORTED_INPUT_HANDLERS.keys():
logger.info(f"Connected through dashboard {connected_through_dashboard}")
if connected_through_dashboard:
# If connected through dashboard get basic dashboard class
input_handler_class = SUPPORTED_INPUT_HANDLERS.get("default")
else:
input_handler_class = SUPPORTED_INPUT_HANDLERS.get(
self.task_config["tools_config"]["input"]["provider"])
self.tools["input"] = input_handler_class(self.queues, self.websocket, get_required_input_types(task),
self.mark_set, self.connected_through_dashboard)
else:
raise "Other input handlers not supported yet"
if self.task_config["tools_config"]["output"] is None:
logger.info("Not setting up any output handler as it is none")
elif self.task_config["tools_config"]["output"]["provider"] in SUPPORTED_OUTPUT_HANDLERS.keys():
output_handler_class = SUPPORTED_OUTPUT_HANDLERS.get(self.task_config["tools_config"]["output"]["provider"])
if self.task_config["tools_config"]["output"]["provider"] == "twilio":
logger.info(f"Making sure that the sampling rate for output handler is 8000")
self.task_config['tools_config']['synthesizer']['provider_config']['sampling_rate'] = 8000
self.task_config['tools_config']['synthesizer']['audio_format'] = 'pcm'
self.tools["output"] = output_handler_class(self.websocket, self.mark_set)
else:
raise "Other input handlers not supported yet"
# Current conversation state
self.current_request_id = None
self.previous_request_id = None
self.llm_rejected_request_ids = set()
self.llm_processed_request_ids = set()
# Agent stuff
self.history = []
self.label_flow = []
# Setup IO SERVICE, TRANSCRIBER, LLM, SYNTHESIZER
self.llm_task = None
self.synthesizer_tasks = []
# state of conversation
self.was_long_pause = False
# Call conversations
self.call_sid = None
self.stream_sid = None
# metering
self.transcriber_duration = 0
self.synthesizer_characters = 0
self.ended_by_assistant = False
self.extracted_data = None
self.summarized_data = None
#self.stream = not connected_through_dashboard and "synthesizer" in self.task_config["tools_config"] and self.task_config["tools_config"]["synthesizer"]["stream"]
self.stream = not connected_through_dashboard #Currently we are allowing only realtime conversation based usecases. Hence it'll always be true unless connected through dashboard
self.is_local = False
# Memory
self.cache = cache
logger.info("task initialization completed")
# Sequence id for interruption
self.curr_sequence_id = 0
self.sequence_ids = set()
async def load_prompt(self, assistant_name, task_id, is_local):
logger.info("prompt and config setup started")
self.is_local = is_local
prompt_responses = await get_prompt_responses(assistant_name, assistant_id=self.assistant_id,
user_id=self.user_id, local=self.is_local)
self.prompts = prompt_responses["task_{}".format(task_id + 1)]
if "system_prompt" in self.prompts:
# This isn't a graph based agent
enriched_prompt = self.prompts["system_prompt"]
if self.context_data is not None:
enriched_prompt = update_prompt_with_context(self.prompts["system_prompt"], self.context_data)
self.system_prompt = {
'role': "system",
'content': enriched_prompt
}
else:
self.system_prompt = {
'role': "system",
'content': ""
}
self.history = [self.system_prompt]
llm_config = {
"streaming_model": self.task_config["tools_config"]["llm_agent"]["streaming_model"],
"classification_model": self.task_config["tools_config"]["llm_agent"]["classification_model"]
}
# setting transcriber
if self.task_config["tools_config"]["transcriber"] is not None:
provider = "playground" if self.connected_through_dashboard else self.task_config["tools_config"]["input"][
"provider"]
self.task_config["tools_config"]["transcriber"]["input_queue"] = self.audio_queue
if self.task_config["tools_config"]["transcriber"]["model"] in SUPPORTED_TRANSCRIBER_MODELS.keys():
if self.connected_through_dashboard:
self.task_config["tools_config"]["transcriber"]["stream"] = False
transcriber_class = SUPPORTED_TRANSCRIBER_MODELS.get(
self.task_config["tools_config"]["transcriber"]["model"])
self.tools["transcriber"] = transcriber_class(provider, **self.task_config["tools_config"]["transcriber"])
# setting synthesizer
logger.info(f"Synthesizer config: {self.task_config['tools_config']['synthesizer']}")
if self.task_config["tools_config"]["synthesizer"] is not None:
self.synthesizer_provider = self.task_config["tools_config"]["synthesizer"].pop("provider")
synthesizer_class = SUPPORTED_SYNTHESIZER_MODELS.get(self.synthesizer_provider)
provider_config = self.task_config["tools_config"]["synthesizer"].pop("provider_config")
if self.connected_through_dashboard:
self.task_config["tools_config"]["synthesizer"]["audio_format"] = "mp3" # Hard code mp3 if we're connected through dashboard
self.task_config["tools_config"]["synthesizer"]["stream"] = False #Hardcode stream to be False as we don't want to get blocked by a __listen_synthesizer co-routine
self.tools["synthesizer"] = synthesizer_class(**self.task_config["tools_config"]["synthesizer"], **provider_config)
llm_config["max_tokens"] = self.task_config["tools_config"]["synthesizer"].get('max_tokens')
llm_config["buffer_size"] = self.task_config["tools_config"]["synthesizer"].get('buffer_size')
# setting llm
if self.task_config["tools_config"]["llm_agent"]["family"] in SUPPORTED_LLM_MODELS.keys():
llm_class = SUPPORTED_LLM_MODELS.get(self.task_config["tools_config"]["llm_agent"]["family"])
llm = llm_class(**llm_config)
else:
raise Exception(f'LLM {self.task_config["tools_config"]["llm_agent"]["family"]} not supported')
if self.task_config["task_type"] == "conversation":
if self.task_config["tools_config"]["llm_agent"]["agent_flow_type"] == "streaming":
self.tools["llm_agent"] = StreamingContextualAgent(llm)
elif self.task_config["tools_config"]["llm_agent"]["agent_flow_type"] in ("preprocessed", "formulaic"):
preprocessed = self.task_config["tools_config"]["llm_agent"]["agent_flow_type"] == "preprocessed"
logger.info(f"LLM TYPE {type(llm)}")
self.tools["llm_agent"] = GraphBasedConversationAgent(llm, context_data=self.context_data,
prompts=self.prompts, preprocessed=preprocessed)
elif self.task_config["task_type"] == "extraction":
logger.info("Setting up extraction agent")
self.tools["llm_agent"] = ExtractionContextualAgent(llm, prompt=self.system_prompt)
self.extracted_data = None
elif self.task_config["task_type"] == "summarization":
logger.info("Setting up summarization agent")
self.tools["llm_agent"] = SummarizationContextualAgent(llm, prompt=self.system_prompt)
self.summarized_data = None
logger.info("prompt and config setup completed")
########################
# LLM task
########################
async def _handle_llm_output(self, next_step, text_chunk, should_bypass_synth, meta_info):
logger.info("received text from LLM for output processing: {}".format(text_chunk))
if next_step == "synthesizer" and not should_bypass_synth:
task = asyncio.gather(self._synthesize(create_ws_data_packet(text_chunk, meta_info)))
self.synthesizer_tasks.append(asyncio.ensure_future(task))
elif self.tools["output"] is not None:
await self.tools["output"].handle(create_ws_data_packet(text_chunk, meta_info))
def _get_next_step(self, sequence, origin):
try:
return next((self.pipelines[sequence][i + 1] for i in range(len(self.pipelines[sequence]) - 1) if
self.pipelines[sequence][i] == origin), "output")
except Exception as e:
logger.error(f"Error getting next step: {e}")
def _set_call_details(self, message):
if self.call_sid is not None and self.stream_sid is not None and "call_sid" not in message['meta_info'] and "stream_sid" not in message['meta_info']:
return
if "call_sid" in message['meta_info']:
self.call_sid = message['meta_info']["call_sid"]
if "stream_sid" in message:
self.stream_sid = message['meta_info']["stream_sid"]
async def _process_followup_task(self, message, sequence, meta_info):
message = format_messages(self.input_parameters["messages"]) # Remove the initial system prompt
self.history.append({
'role': 'user',
'content': message
})
json_data = await self.tools["llm_agent"].generate(self.history)
if "summary" in json_data:
logger.info(f'Summary {json_data["summary"]}')
self.summarized_data = json_data["summary"]
else:
json_data = clean_json_string(json_data)
logger.info(f"After replacing {json_data}")
json_data = json.loads(json_data)
self.extracted_data = json_data
logger.info("Done")
async def _process_conversation_preprocessed_task(self, message, sequence, meta_info):
if self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "preprocessed":
llm_response = ""
self.history.append({
'role': 'user',
'content': message['data']
})
start_time = time.time()
async for text_chunk in self.tools['llm_agent'].generate(self.history, stream=True, synthesize=True,
label_flow=self.label_flow):
if text_chunk == "<end_of_conversation>":
logger.info("Got end of conversation. I'm stopping now")
self.conversation_ended = True
await asyncio.sleep(5) #Make sure that the message is passed over and complete before cutting the handler
await self.tools["input"].stop_handler()
logger.info("Stopped input handler")
if "transcriber" in self.tools and not self.connected_through_dashboard:
logger.info("Stopping transcriber")
await self.tools["transcriber"].toggle_connection()
await asyncio.sleep(5) # Making sure whatever message was passed is over
return
logger.info(f"Text chunk {text_chunk}")
if is_valid_md5(text_chunk):
self.synthesizer_tasks.append(asyncio.create_task(
self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=True))))
else:
self.synthesizer_tasks.append(asyncio.create_task(
self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=False))))
async def _process_conversation_formulaic_task(self, message, sequence, meta_info):
self.history.append({
'role': 'user',
'content': message['data']
})
start_time = time.time()
llm_response = ""
logger.info("Agent flow is formulaic and hence moving smoothly")
async for text_chunk in self.tools['llm_agent'].generate(self.history, stream=True, synthesize=True):
if is_valid_md5(text_chunk):
self.synthesizer_tasks.append(asyncio.create_task(
self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=True))))
else:
# TODO Make it more modular
llm_response += " " +text_chunk
next_step = self._get_next_step(sequence, "llm")
if next_step == "synthesizer":
task = asyncio.gather(self._synthesize(create_ws_data_packet(text_chunk, meta_info)))
self.synthesizer_tasks.append(asyncio.ensure_future(task))
else:
logger.info(f"Sending output text {sequence}")
await self.tools["output"].handle(create_ws_data_packet(text_chunk, meta_info))
self.synthesizer_tasks.append(asyncio.create_task(
self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=False))))
async def _process_conversation_task(self, message, sequence, meta_info):
next_step = None
logger.info("agent flow is not preprocessed")
llm_response = ""
self.history.append({
'role': 'user',
'content': message['data']
})
start_time = time.time()
should_bypass_synth = 'bypass_synth' in meta_info and meta_info['bypass_synth'] == True
next_step = self._get_next_step(sequence, "llm")
curr_sequence_id = self.curr_sequence_id + 1
meta_info["sequence_id"] = curr_sequence_id
cache_response = self.cache.get(get_md5_hash(message['data'])) if self.cache is not None else None
if cache_response is not None:
logger.info("It was a cache hit and hence simply returning")
await self._handle_llm_output(next_step, cache_response, should_bypass_synth, meta_info)
else:
async for llm_message in self.tools['llm_agent'].generate(self.history, synthesize=True):
text_chunk, end_of_llm_stream = llm_message
logger.info(f"###### time to get the first chunk {time.time() - start_time} {text_chunk}")
llm_response += " " + text_chunk
if end_of_llm_stream:
meta_info["end_of_llm_stream"] = True
if self.stream:
await self._handle_llm_output(next_step, text_chunk, should_bypass_synth, meta_info)
if not self.stream:
meta_info["end_of_llm_stream"]= True
await self._handle_llm_output(next_step, llm_response, should_bypass_synth, meta_info)
#add to cache
# if self.cache is not None:
# self.cache.set(get_md5_hash(message['data']), llm_response)
if self.current_request_id in self.llm_rejected_request_ids:
logger.info("User spoke while LLM was generating response")
else:
self.history.append({"role": "assistant", "content": llm_response})
# TODO : Write a better check for completion prompt
#answer = await self.tools["llm_agent"].check_for_completion(self.history)
answer = False
if answer:
logger.info("Got end of conversation. I'm stopping now")
self.conversation_ended = True
self.ended_by_assistant = True
await self.tools["input"].stop_handler()
logger.info("Stopped input handler")
if "transcriber" in self.tools and not self.connected_through_dashboard:
logger.info("Stopping transcriber")
await self.tools["transcriber"].toggle_connection()
await asyncio.sleep(5) # Making sure whatever message was passed is over
return
self.llm_processed_request_ids.add(self.current_request_id)
llm_response = ""
def _extract_sequence_and_meta(self, message):
sequence, meta_info = None, None
if isinstance(message, dict) and "meta_info" in message:
self._set_call_details(message)
sequence = message["meta_info"]["sequence"]
meta_info = message["meta_info"]
return sequence, meta_info
def _is_extraction_task(self):
return self.task_config["task_type"] == "extraction"
def _is_summarization_task(self):
return self.task_config["task_type"] == "summarization"
def _is_conversation_task(self):
return self.task_config["task_type"] == "conversation"
def _is_preprocessed_flow(self):
return self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "preprocessed"
def _is_formulaic_flow(self):
return self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "formulaic"
# This is used only in the case it's a text based chatbot
async def _listen_llm_input_queue(self):
logger.info(
f"Starting listening to LLM queue as either Connected to dashboard = {self.connected_through_dashboard} or it's a textual chat agent {self.textual_chat_agent}")
while True:
try:
ws_data_packet = await self.queues["llm"].get()
logger.info(f"ws_data_packet {ws_data_packet}")
bos_packet = create_ws_data_packet("<beginning_of_stream>", ws_data_packet['meta_info'])
await self.tools["output"].handle(bos_packet)
await self._run_llm_task(
ws_data_packet) # In case s3 is down and it's an audio processing job, this might produce blank message on the frontend of playground.
eos_packet = create_ws_data_packet("<end_of_stream>", ws_data_packet['meta_info'])
await self.tools["output"].handle(eos_packet)
except Exception as e:
traceback.print_exc()
logger.error(f"Something went wrong with LLM queue {e}")
break
async def _run_llm_task(self, message):
logger.info("running llm based agent")
sequence, meta_info = self._extract_sequence_and_meta(message)
try:
if self._is_extraction_task() or self._is_summarization_task():
await self._process_followup_task(message, sequence, meta_info)
elif self._is_conversation_task():
if self._is_preprocessed_flow():
await self._process_conversation_preprocessed_task(message, sequence, meta_info)
elif self._is_formulaic_flow():
await self._process_conversation_formulaic_task(message, sequence, meta_info)
else:
await self._process_conversation_task(message, sequence, meta_info)
else:
logger.error("unsupported task type: {}".format(self.task_config["task_type"]))
self.llm_task = None
except Exception as e:
traceback.print_exc()
logger.error(f"Something went wrong in llm: {e}")
async def process_transcriber_request(self, meta_info):
if not self.current_request_id or self.current_request_id != meta_info["request_id"]:
self.previous_request_id, self.current_request_id = self.current_request_id, meta_info["request_id"]
sequence = meta_info["sequence"]
# check if previous request id is not in transmitted request id
if self.previous_request_id is None:
is_first_message = True
elif self.previous_request_id not in self.llm_processed_request_ids:
self.llm_rejected_request_ids.add(self.previous_request_id)
else:
skip_append_to_data = False
return sequence
async def process_interruption(self):
await self.tools["output"].handle_interruption()
self.sequence_ids = set() #Remove all the sequence ids so subsequent won't be processed
if self.llm_task is not None:
self.llm_task.cancel()
self.llm_task = None
self.was_long_pause = True
# if len(self.synthesizer_tasks) > 0:
# for synth_task in self.synthesizer_tasks:
# synth_task.cancel()
# self.synthesizer_tasks = []
########################
# Transcriber task
########################
async def _handle_transcriber_output(self, next_task, transcriber_message, meta_info):
if next_task == "llm":
meta_info["origin"] = "transcriber"
self.llm_task = asyncio.create_task(
self._run_llm_task(create_ws_data_packet(transcriber_message, meta_info)))
elif next_task == "synthesizer":
self.synthesizer_tasks.append(asyncio.create_task(
self._synthesize(create_ws_data_packet(transcriber_message, meta_info))))
else:
logger.info(f"Need to separate out output task")
async def _listen_transcriber(self):
transcriber_message = ""
start_time = None
try:
if self.stream:
async for message in self.tools["transcriber"].transcribe():
if message['data'] == "transcriber_connection_closed":
self.transcriber_duration += message['meta_info']["transcriber_duration"]
logger.info("transcriber connection closed")
return
self._set_call_details(message)
meta_info = message["meta_info"]
sequence = await self.process_transcriber_request(meta_info)
if message['data'] == "TRANSCRIBER_BEGIN":
logger.info("starting transcriber stream")
start_time = time.time()
await self.tools["output"].handle_interruption()
if self.llm_task is not None:
logger.info("Cancelling LLM Task as it's on")
self.llm_task.cancel()
self.llm_task = None
self.was_long_pause = True
if len(self.synthesizer_tasks) > 0:
logger.info("Cancelling Synthesizer tasks")
for synth_task in self.synthesizer_tasks:
synth_task.cancel()
self.synthesizer_tasks = []
continue
elif message['data'] == "TRANSCRIBER_END":
logger.info("transcriber stream and preparing the next step")
next_task = self._get_next_step(sequence, "transcriber")
logger.info(f'got the next task {next_task}')
if self.was_long_pause:
logger.info(
f"Seems like there was a long pause {self.history[-1]['content']} , {transcriber_message}")
message = self.history[-1]['content'] + " " + transcriber_message
self.history = self.history[:-1]
self.was_long_pause = False
logger.info(f'invoking next_task {next_task} with transcriber_message: {transcriber_message}')
await self._handle_transcriber_output(next_task, transcriber_message, meta_info)
transcriber_message = ""
continue
else:
logger.info("processed text from transcriber: {}".format(message['data']))
transcriber_message += message['data']
else:
logger.info("Not a streaming conversation. Hence getting a full blown transcript")
async for message in self.tools["transcriber"].transcribe():
logger.info(f"message from transcriber {message}")
sequence = message["meta_info"]["sequence"]
next_task = self._get_next_step(sequence, "transcriber")
self.transcriber_duration += message["meta_info"]["transcriber_duration"] if "transcriber_duration" in message["meta_info"] else 0
await self._handle_transcriber_output(next_task, message['data'], message["meta_info"])
except Exception as e:
traceback.print_exc()
logger.error(f"Error in transcriber {e}")
async def __listen_synthesizer(self):
try:
if self.stream and self.synthesizer_provider != "polly":
logger.info("Opening websocket connection to synthesizer")
await self.tools["synthesizer"].open_connection()
while True:
logger.info("Listening to synthesizer")
async for message in self.tools["synthesizer"].generate():
if not self.conversation_ended and message["meta_info"]["sequence_id"] in self.sequence_ids:
await self.tools["output"].handle(message)
if "end_of_synthesizer_stream" in message["meta_info"] and message["meta_info"]["end_of_synthesizer_stream"]:
logger.info(f"Got End of stream and hence removing from sequence ids {self.sequence_ids} {message['meta_info']['sequence_id']}")
self.sequence_ids.remove(message["meta_info"]["sequence_id"])
await asyncio.sleep(1)
except Exception as e:
logger.error(f"Error in synthesizer {e}")
async def _synthesize(self, message):
meta_info = message["meta_info"]
text = message["data"]
meta_info["type"] = "audio"
try:
if meta_info["is_md5_hash"]:
logger.info('sending preprocessed audio response to {}'.format(
self.task_config["tools_config"]["output"]["provider"])) | audio_chunk = await get_raw_audio_bytes_from_base64(self.assistant_name, text, | 3 | 2023-12-13 09:07:35+00:00 | 8k |
relari-ai/continuous-eval | tests/retrieval_metrics_test.py | [
{
"identifier": "LLMFactory",
"path": "continuous_eval/llm_factory.py",
"snippet": "class LLMFactory(LLMInterface):\n def __init__(self, model):\n super().__init__()\n self.model = model\n if model in [\"gpt-3.5-turbo-1106\", \"gpt-3.5-turbo-16k\", \"gpt-4-1106-preview\"]:\n ... | import pytest
from continuous_eval.llm_factory import LLMFactory
from continuous_eval.metrics import (
ExactSentenceMatch,
LLMBasedContextCoverage,
LLMBasedContextPrecision,
PrecisionRecallF1,
RankedRetrievalMetrics,
RougeChunkMatch,
RougeSentenceMatch,
)
from tests.helpers import example_datum
from tests.helpers.utils import all_close, in_zero_one | 6,792 |
def test_precision_recall_exact_chunk_match():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
expected_results = [
{"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0},
{"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0},
]
metric = PrecisionRecallF1(RougeChunkMatch(threshold=0.7))
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_precision_recall_exact_sentence_match():
data = [example_datum.ROMEO_AND_JULIET]
expected_results = [{"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0}]
metric = PrecisionRecallF1(RougeSentenceMatch(threshold=0.8))
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_precision_recall_rouge_sentence_match():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.IMPLICATIONS_GLOBAL_WARMING]
expected_results = [
{"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0},
{"context_precision": 0.09090909090909091, "context_recall": 0.5, "context_f1": 0.15384615384615385},
]
metric = PrecisionRecallF1(RougeSentenceMatch())
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_ranked_retrieval_exact_chunk_match():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
expected_results = [
{"average_precision": 0, "reciprocal_rank": 0, "ndcg": 0.0},
{"average_precision": 1.0, "reciprocal_rank": 1.0, "ndcg": 1.0},
]
metric = RankedRetrievalMetrics(RougeChunkMatch())
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_ranked_retrieval_exact_sentence_match():
with pytest.raises(AssertionError):
RankedRetrievalMetrics(ExactSentenceMatch())
def test_llm_based_context_precision():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
metric = LLMBasedContextPrecision()
assert all(in_zero_one(metric.calculate(**datum)) for datum in data)
def test_llm_based_context_coverage_openai():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
|
def test_precision_recall_exact_chunk_match():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
expected_results = [
{"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0},
{"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0},
]
metric = PrecisionRecallF1(RougeChunkMatch(threshold=0.7))
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_precision_recall_exact_sentence_match():
data = [example_datum.ROMEO_AND_JULIET]
expected_results = [{"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0}]
metric = PrecisionRecallF1(RougeSentenceMatch(threshold=0.8))
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_precision_recall_rouge_sentence_match():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.IMPLICATIONS_GLOBAL_WARMING]
expected_results = [
{"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0},
{"context_precision": 0.09090909090909091, "context_recall": 0.5, "context_f1": 0.15384615384615385},
]
metric = PrecisionRecallF1(RougeSentenceMatch())
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_ranked_retrieval_exact_chunk_match():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
expected_results = [
{"average_precision": 0, "reciprocal_rank": 0, "ndcg": 0.0},
{"average_precision": 1.0, "reciprocal_rank": 1.0, "ndcg": 1.0},
]
metric = RankedRetrievalMetrics(RougeChunkMatch())
assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results))
def test_ranked_retrieval_exact_sentence_match():
with pytest.raises(AssertionError):
RankedRetrievalMetrics(ExactSentenceMatch())
def test_llm_based_context_precision():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
metric = LLMBasedContextPrecision()
assert all(in_zero_one(metric.calculate(**datum)) for datum in data)
def test_llm_based_context_coverage_openai():
data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
| metric = LLMBasedContextCoverage(model=LLMFactory("gpt-3.5-turbo-1106")) | 0 | 2023-12-08 21:30:39+00:00 | 8k |
Seunggu0305/VLCounter | tools/models/VLCounter.py | [
{
"identifier": "VPTCLIPVisionTransformer",
"path": "tools/models/ViT_Encoder.py",
"snippet": "class VPTCLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[6,7,8,11], pretrained=No... | import math
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .ViT_Encoder import VPTCLIPVisionTransformer as vpt
from .ViT_Encoder_add import SPTCLIPVisionTransformer as spt
from .Text_Encoder import CLIPTextEncoder
from timm.models.layers import trunc_normal_ | 6,434 |
def trunc_normal_init(module: nn.Module,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
bias: float = 0) -> None:
if hasattr(module, 'weight') and module.weight is not None:
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias) # type: ignore
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
class UpConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel, padding=0, flag=True):
super(UpConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=padding)
if flag:
self.gn = nn.GroupNorm(8, out_channels)
self.gelu = nn.GELU()
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.flag = flag
def forward(self, trg):
trg = self.conv(trg)
if self.flag:
trg = self.up(self.gelu(self.gn(trg)))
return trg
class Counter(nn.Module):
def __init__(self, args):
super(Counter,self).__init__()
self.v = args.v
self.enc = args.enc
embed_dims = 512
proj_dims = 64
self.t_proj = nn.Linear(embed_dims, proj_dims)
self.v_proj = nn.Linear(embed_dims, proj_dims)
self.proj = nn.Sequential(
nn.Conv2d(768, proj_dims, 1),
nn.GroupNorm(8, proj_dims),
nn.GELU(),
nn.UpsamplingBilinear2d(scale_factor=2)
)
self.proj1 = nn.Sequential(
nn.Conv2d(768, proj_dims, 1),
nn.GroupNorm(8, proj_dims),
nn.GELU(),
nn.UpsamplingBilinear2d(scale_factor=4)
)
self.proj2 = nn.Sequential(
nn.Conv2d(768, proj_dims, 1),
nn.GroupNorm(8, proj_dims),
nn.GELU(),
nn.UpsamplingBilinear2d(scale_factor=8)
)
self.decoder = nn.ModuleList([
UpConv(proj_dims+1, proj_dims, 3, 1),
UpConv(proj_dims, proj_dims, 3,1),
UpConv(proj_dims, proj_dims, 3, 1),
UpConv(proj_dims, proj_dims, 3,1),
UpConv(proj_dims, 1, 1, flag=False)
])
self.attn_weight = nn.Parameter(torch.ones(1, 1, 24, 24))
self.attn_bias = nn.Parameter(torch.zeros(1, 1, 24, 24))
self.init_weights()
if args.enc == "spt":
self.v_enc = spt(pretrained=args.MODEL.pretrain+'ViT-B-16.pt', num_tokens=args.num_tokens, patch_size=args.patch_size)
self.v_enc.init_weights()
elif args.enc == "vpt":
|
def trunc_normal_init(module: nn.Module,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
bias: float = 0) -> None:
if hasattr(module, 'weight') and module.weight is not None:
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias) # type: ignore
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
class UpConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel, padding=0, flag=True):
super(UpConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=padding)
if flag:
self.gn = nn.GroupNorm(8, out_channels)
self.gelu = nn.GELU()
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.flag = flag
def forward(self, trg):
trg = self.conv(trg)
if self.flag:
trg = self.up(self.gelu(self.gn(trg)))
return trg
class Counter(nn.Module):
def __init__(self, args):
super(Counter,self).__init__()
self.v = args.v
self.enc = args.enc
embed_dims = 512
proj_dims = 64
self.t_proj = nn.Linear(embed_dims, proj_dims)
self.v_proj = nn.Linear(embed_dims, proj_dims)
self.proj = nn.Sequential(
nn.Conv2d(768, proj_dims, 1),
nn.GroupNorm(8, proj_dims),
nn.GELU(),
nn.UpsamplingBilinear2d(scale_factor=2)
)
self.proj1 = nn.Sequential(
nn.Conv2d(768, proj_dims, 1),
nn.GroupNorm(8, proj_dims),
nn.GELU(),
nn.UpsamplingBilinear2d(scale_factor=4)
)
self.proj2 = nn.Sequential(
nn.Conv2d(768, proj_dims, 1),
nn.GroupNorm(8, proj_dims),
nn.GELU(),
nn.UpsamplingBilinear2d(scale_factor=8)
)
self.decoder = nn.ModuleList([
UpConv(proj_dims+1, proj_dims, 3, 1),
UpConv(proj_dims, proj_dims, 3,1),
UpConv(proj_dims, proj_dims, 3, 1),
UpConv(proj_dims, proj_dims, 3,1),
UpConv(proj_dims, 1, 1, flag=False)
])
self.attn_weight = nn.Parameter(torch.ones(1, 1, 24, 24))
self.attn_bias = nn.Parameter(torch.zeros(1, 1, 24, 24))
self.init_weights()
if args.enc == "spt":
self.v_enc = spt(pretrained=args.MODEL.pretrain+'ViT-B-16.pt', num_tokens=args.num_tokens, patch_size=args.patch_size)
self.v_enc.init_weights()
elif args.enc == "vpt": | self.v_enc = vpt(pretrained=args.MODEL.pretrain+'ViT-B-16.pt') | 1 | 2023-12-13 08:00:28+00:00 | 8k |
qitan/devops-backend-lite | apps/dashboard/views.py | [
{
"identifier": "Search",
"path": "common/utils/ElasticSearchAPI.py",
"snippet": "class Search(BaseSearch):\n def __init__(self, prefix=False, **kwargs):\n if kwargs.get('index', None) and prefix:\n if isinstance(kwargs['index'], string_types):\n kwargs['index'] = f\"... | import random
import operator
import logging
from functools import reduce
from jira import Project
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import action
from django.db.models import Q, Count, Sum, Avg
from django.db.models.query import QuerySet
from django.apps import apps
from django.db.models.functions import ExtractWeek, ExtractYear, ExtractDay, ExtractMonth
from elasticsearch_dsl import Q as EQ
from common.utils.ElasticSearchAPI import Search
from dbapp.model.model_cmdb import Product, MicroApp
from common.ext_fun import get_datadict, get_time_range
from common.extends.viewsets import CustomModelViewSet
from common.variables import CMDB_RELATED_TYPE, DASHBOARD_CONFIG, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FORMAT_T_ES
from dbapp.model.model_dashboard import DashBoard
from dashboard.serializers import DashBoardSerializers
from dbapp.model.model_deploy import BuildJob | 4,831 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Author : Charles Lai
@Contact : qqing_lai@hotmail.com
@Time : 2021/12/27 17:47
@FileName: views.py
@Blog : https://imaojia.com
'''
logger = logging.getLogger(__name__)
class LiveCheck(APIView):
"""
探针检测
"""
permission_classes = []
def get(self, request, format=None):
return Response('PONG')
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Author : Charles Lai
@Contact : qqing_lai@hotmail.com
@Time : 2021/12/27 17:47
@FileName: views.py
@Blog : https://imaojia.com
'''
logger = logging.getLogger(__name__)
class LiveCheck(APIView):
"""
探针检测
"""
permission_classes = []
def get(self, request, format=None):
return Response('PONG')
| class DashBoardViewSet(CustomModelViewSet): | 5 | 2023-12-13 03:09:32+00:00 | 8k |
abing7k/redroid-script | redroid.py | [
{
"identifier": "Gapps",
"path": "stuffs/gapps.py",
"snippet": "class Gapps(General):\n dl_links = {\n \"x86_64\": [\"https://cfhcable.dl.sourceforge.net/project/opengapps/x86_64/20220503/open_gapps-x86_64-10.0-pico-20220503.zip\", \"5fb186bfb7bed8925290f79247bec4cf\"],\n \"x86\... | import argparse
import tools.helper as helper
import subprocess
from stuffs.gapps import Gapps
from stuffs.magisk import Magisk
from stuffs.ndk import Ndk
from stuffs.widevine import Widevine | 4,324 | #!/usr/bin/env python3
def main():
dockerfile = ""
tags = []
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--android-version',
dest='android',
help='Specify the Android version to build',
default='11.0.0',
choices=['13.0.0', '12.0.0', '12.0.0_64only', '11.0.0', '10.0.0', '9.0.0', '8.1.0'])
parser.add_argument('-g', '--install-gapps',
dest='gapps',
help='Install OpenGapps to ReDroid',
action='store_true')
parser.add_argument('-n', '--install-ndk-translation',
dest='ndk',
help='Install libndk translation files',
action='store_true')
parser.add_argument('-m', '--install-magisk', dest='magisk',
help='Install Magisk ( Bootless )',
action='store_true')
parser.add_argument('-w', '--install-widevine', dest='widevine',
help='Integrate Widevine DRM (L3)',
action='store_true')
parser.add_argument('-c', '--container',
dest='container',
default='docker',
help='Specify container type',
choices=['docker', 'podman'])
args = parser.parse_args()
dockerfile = dockerfile + \
"FROM redroid/redroid:{}-latest\n".format(
args.android)
tags.append(args.android)
if args.gapps:
Gapps().install()
dockerfile = dockerfile + "COPY gapps /\n"
tags.append("gapps")
if args.ndk:
if args.android in ["11.0.0", "12.0.0", "12.0.0_64only"]:
arch = helper.host()[0]
if arch == "x86" or arch == "x86_64":
| #!/usr/bin/env python3
def main():
dockerfile = ""
tags = []
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--android-version',
dest='android',
help='Specify the Android version to build',
default='11.0.0',
choices=['13.0.0', '12.0.0', '12.0.0_64only', '11.0.0', '10.0.0', '9.0.0', '8.1.0'])
parser.add_argument('-g', '--install-gapps',
dest='gapps',
help='Install OpenGapps to ReDroid',
action='store_true')
parser.add_argument('-n', '--install-ndk-translation',
dest='ndk',
help='Install libndk translation files',
action='store_true')
parser.add_argument('-m', '--install-magisk', dest='magisk',
help='Install Magisk ( Bootless )',
action='store_true')
parser.add_argument('-w', '--install-widevine', dest='widevine',
help='Integrate Widevine DRM (L3)',
action='store_true')
parser.add_argument('-c', '--container',
dest='container',
default='docker',
help='Specify container type',
choices=['docker', 'podman'])
args = parser.parse_args()
dockerfile = dockerfile + \
"FROM redroid/redroid:{}-latest\n".format(
args.android)
tags.append(args.android)
if args.gapps:
Gapps().install()
dockerfile = dockerfile + "COPY gapps /\n"
tags.append("gapps")
if args.ndk:
if args.android in ["11.0.0", "12.0.0", "12.0.0_64only"]:
arch = helper.host()[0]
if arch == "x86" or arch == "x86_64": | Ndk().install() | 2 | 2023-12-06 09:03:05+00:00 | 8k |
zvict/papr | models/model.py | [
{
"identifier": "normalize_vector",
"path": "models/utils.py",
"snippet": "def normalize_vector(x, eps=0.):\n # assert(x.shape[-1] == 3)\n return x / (torch.norm(x, dim=-1, keepdim=True) + eps)"
},
{
"identifier": "create_learning_rate_fn",
"path": "models/utils.py",
"snippet": "de... | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import os
import numpy as np
from .utils import normalize_vector, create_learning_rate_fn, add_points_knn, activation_func
from .mlp import get_mapping_mlp
from .tx import get_transformer
from .renderer import get_generator | 4,447 |
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class PAPR(nn.Module):
def __init__(self, args, device='cuda'):
super(PAPR, self).__init__()
self.args = args
self.eps = args.eps
self.device = device
self.use_amp = args.use_amp
self.amp_dtype = torch.float16 if args.amp_dtype == 'float16' else torch.bfloat16
self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)
point_opt = args.geoms.points
pc_feat_opt = args.geoms.point_feats
bkg_feat_opt = args.geoms.background
self.register_buffer('select_k', torch.tensor(
point_opt.select_k, device=device, dtype=torch.int32))
self.coord_scale = args.dataset.coord_scale
if point_opt.load_path:
if point_opt.load_path.endswith('.pth') or point_opt.load_path.endswith('.pt'):
points = torch.load(point_opt.load_path, map_location='cpu')
points = np.asarray(points).astype(np.float32)
np.random.shuffle(points)
points = points[:args.max_num_pts, :]
points = torch.from_numpy(points).float()
print("Loaded points from {}, shape: {}, dtype {}".format(point_opt.load_path, points.shape, points.dtype))
print("Loaded points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max())
else:
# Initialize point positions
pt_init_center = [i * self.coord_scale for i in point_opt.init_center]
pt_init_scale = [i * self.coord_scale for i in point_opt.init_scale]
if point_opt.init_type == 'sphere': # initial points on a sphere
points = self._sphere_pc(pt_init_center, point_opt.num, pt_init_scale)
elif point_opt.init_type == 'cube': # initial points in a cube
points = self._cube_normal_pc(pt_init_center, point_opt.num, pt_init_scale)
else:
raise NotImplementedError("Point init type [{:s}] is not found".format(point_opt.init_type))
print("Scratch points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max())
self.points = torch.nn.Parameter(points, requires_grad=True)
# Initialize point influence scores
self.points_influ_scores = torch.nn.Parameter(torch.ones(
points.shape[0], 1, device=device) * point_opt.influ_init_val, requires_grad=True)
# Initialize mapping MLP, only if fine-tuning with IMLE for the exposure control
self.mapping_mlp = None
if args.models.mapping_mlp.use:
self.mapping_mlp = get_mapping_mlp(
args.models, use_amp=self.use_amp, amp_dtype=self.amp_dtype)
# Initialize UNet
if args.models.use_renderer:
tx_opt = args.models.transformer
feat_dim = tx_opt.embed.d_ff_out if tx_opt.embed.share_embed else tx_opt.embed.value.d_ff_out
self.renderer = get_generator(args.models.renderer.generator, in_c=feat_dim,
out_c=3, use_amp=self.use_amp, amp_dtype=self.amp_dtype)
print("Renderer: ", count_parameters(self.renderer))
else:
assert (args.models.transformer.embed.share_embed and args.models.transformer.embed.d_ff_out == 3) or \
(not args.models.transformer.embed.share_embed and args.models.transformer.embed.value.d_ff_out == 3), \
"Value embedding MLP should have output dim 3 if not using renderer"
# Initialize background score and features
if bkg_feat_opt.init_type == 'random':
bkg_feat_init_func = torch.rand
elif bkg_feat_opt.init_type == 'zeros':
bkg_feat_init_func = torch.zeros
elif bkg_feat_opt.init_type == 'ones':
bkg_feat_init_func = torch.ones
else:
raise NotImplementedError(
"Background init type [{:s}] is not found".format(bkg_feat_opt.init_type))
feat_dim = 3
self.bkg_feats = nn.Parameter(bkg_feat_init_func(bkg_feat_opt.seq_len, feat_dim, device=device) * bkg_feat_opt.init_scale, requires_grad=bkg_feat_opt.learnable)
self.bkg_score = torch.tensor(bkg_feat_opt.constant, device=device, dtype=torch.float32).reshape(1)
# Initialize point features
self.use_pc_feats = pc_feat_opt.use_ink or pc_feat_opt.use_inq or pc_feat_opt.use_inv
if self.use_pc_feats:
self.pc_feats = nn.Parameter(torch.randn(points.shape[0], pc_feat_opt.dim), requires_grad=True)
print("Point features: ", self.pc_feats.shape, self.pc_feats.min(), self.pc_feats.max(), self.pc_feats.mean(), self.pc_feats.std())
v_extra_dim = 0
k_extra_dim = 0
q_extra_dim = 0
if pc_feat_opt.use_inv:
v_extra_dim = self.pc_feats.shape[-1]
print("Using v_extra_dim: ", v_extra_dim)
if pc_feat_opt.use_ink:
k_extra_dim = self.pc_feats.shape[-1]
print("Using k_extra_dim: ", k_extra_dim)
if pc_feat_opt.use_inq:
q_extra_dim = self.pc_feats.shape[-1]
print("Using q_extra_dim: ", q_extra_dim)
self.last_act = activation_func(args.models.last_act)
# Initialize proximity attention layer(s)
|
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class PAPR(nn.Module):
def __init__(self, args, device='cuda'):
super(PAPR, self).__init__()
self.args = args
self.eps = args.eps
self.device = device
self.use_amp = args.use_amp
self.amp_dtype = torch.float16 if args.amp_dtype == 'float16' else torch.bfloat16
self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)
point_opt = args.geoms.points
pc_feat_opt = args.geoms.point_feats
bkg_feat_opt = args.geoms.background
self.register_buffer('select_k', torch.tensor(
point_opt.select_k, device=device, dtype=torch.int32))
self.coord_scale = args.dataset.coord_scale
if point_opt.load_path:
if point_opt.load_path.endswith('.pth') or point_opt.load_path.endswith('.pt'):
points = torch.load(point_opt.load_path, map_location='cpu')
points = np.asarray(points).astype(np.float32)
np.random.shuffle(points)
points = points[:args.max_num_pts, :]
points = torch.from_numpy(points).float()
print("Loaded points from {}, shape: {}, dtype {}".format(point_opt.load_path, points.shape, points.dtype))
print("Loaded points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max())
else:
# Initialize point positions
pt_init_center = [i * self.coord_scale for i in point_opt.init_center]
pt_init_scale = [i * self.coord_scale for i in point_opt.init_scale]
if point_opt.init_type == 'sphere': # initial points on a sphere
points = self._sphere_pc(pt_init_center, point_opt.num, pt_init_scale)
elif point_opt.init_type == 'cube': # initial points in a cube
points = self._cube_normal_pc(pt_init_center, point_opt.num, pt_init_scale)
else:
raise NotImplementedError("Point init type [{:s}] is not found".format(point_opt.init_type))
print("Scratch points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max())
self.points = torch.nn.Parameter(points, requires_grad=True)
# Initialize point influence scores
self.points_influ_scores = torch.nn.Parameter(torch.ones(
points.shape[0], 1, device=device) * point_opt.influ_init_val, requires_grad=True)
# Initialize mapping MLP, only if fine-tuning with IMLE for the exposure control
self.mapping_mlp = None
if args.models.mapping_mlp.use:
self.mapping_mlp = get_mapping_mlp(
args.models, use_amp=self.use_amp, amp_dtype=self.amp_dtype)
# Initialize UNet
if args.models.use_renderer:
tx_opt = args.models.transformer
feat_dim = tx_opt.embed.d_ff_out if tx_opt.embed.share_embed else tx_opt.embed.value.d_ff_out
self.renderer = get_generator(args.models.renderer.generator, in_c=feat_dim,
out_c=3, use_amp=self.use_amp, amp_dtype=self.amp_dtype)
print("Renderer: ", count_parameters(self.renderer))
else:
assert (args.models.transformer.embed.share_embed and args.models.transformer.embed.d_ff_out == 3) or \
(not args.models.transformer.embed.share_embed and args.models.transformer.embed.value.d_ff_out == 3), \
"Value embedding MLP should have output dim 3 if not using renderer"
# Initialize background score and features
if bkg_feat_opt.init_type == 'random':
bkg_feat_init_func = torch.rand
elif bkg_feat_opt.init_type == 'zeros':
bkg_feat_init_func = torch.zeros
elif bkg_feat_opt.init_type == 'ones':
bkg_feat_init_func = torch.ones
else:
raise NotImplementedError(
"Background init type [{:s}] is not found".format(bkg_feat_opt.init_type))
feat_dim = 3
self.bkg_feats = nn.Parameter(bkg_feat_init_func(bkg_feat_opt.seq_len, feat_dim, device=device) * bkg_feat_opt.init_scale, requires_grad=bkg_feat_opt.learnable)
self.bkg_score = torch.tensor(bkg_feat_opt.constant, device=device, dtype=torch.float32).reshape(1)
# Initialize point features
self.use_pc_feats = pc_feat_opt.use_ink or pc_feat_opt.use_inq or pc_feat_opt.use_inv
if self.use_pc_feats:
self.pc_feats = nn.Parameter(torch.randn(points.shape[0], pc_feat_opt.dim), requires_grad=True)
print("Point features: ", self.pc_feats.shape, self.pc_feats.min(), self.pc_feats.max(), self.pc_feats.mean(), self.pc_feats.std())
v_extra_dim = 0
k_extra_dim = 0
q_extra_dim = 0
if pc_feat_opt.use_inv:
v_extra_dim = self.pc_feats.shape[-1]
print("Using v_extra_dim: ", v_extra_dim)
if pc_feat_opt.use_ink:
k_extra_dim = self.pc_feats.shape[-1]
print("Using k_extra_dim: ", k_extra_dim)
if pc_feat_opt.use_inq:
q_extra_dim = self.pc_feats.shape[-1]
print("Using q_extra_dim: ", q_extra_dim)
self.last_act = activation_func(args.models.last_act)
# Initialize proximity attention layer(s) | transformer = get_transformer(args.models.transformer, | 5 | 2023-12-08 19:51:42+00:00 | 8k |
AdaCheng/EgoThink | models/instruct_blip/models/blip2_models/blip2.py | [
{
"identifier": "dist_utils",
"path": "models/instruct_blip/common/dist_utils.py",
"snippet": "def setup_for_distributed(is_master):\n def print(*args, **kwargs):\ndef is_dist_avail_and_initialized():\ndef get_world_size():\ndef get_rank():\ndef is_main_process():\ndef init_distributed_mode(args):\nd... | import contextlib
import logging
import os
import time
import datetime
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
import spacy
from ...common import dist_utils as dist_utils
from ...common.dist_utils import download_cached_file
from ...common.utils import is_url
from ...common.logger import MetricLogger
from ..base_model import BaseModel
from ..blip2_models.Qformer import BertConfig, BertLMHeadModel
from ..eva_vit import create_eva_vit_g
from ..clip_vit import create_clip_vit_L
from transformers import BertTokenizer | 4,090 | """
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
| """
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
| class Blip2Base(BaseModel): | 4 | 2023-12-05 14:17:17+00:00 | 8k |
3dlg-hcvc/cage | systems/base.py | [
{
"identifier": "SaverMixin",
"path": "utils/savermixins.py",
"snippet": "class SaverMixin():\n def set_save_dir(self, stage):\n self.hparams.save_dir = os.path.join(self.logger.log_dir, 'images', stage) \n os.makedirs(self.hparams.save_dir, exist_ok=True)\n\n @property\n def save... | import torch
import models
import numpy as np
import lightning.pytorch as pl
from diffusers import DDPMScheduler
from utils.savermixins import SaverMixin
from utils.refs import label_ref, joint_ref
from utils.plot import viz_graph, make_grid, add_text
from utils.render import rescale_axis, draw_boxes_axiss_anim, get_bbox_mesh_pair, get_axis_mesh | 5,940 | aabb_min = self.convert_format(x[:, 3:6])
center = (aabb_max + aabb_min) / 2.
size = (aabb_max - aabb_min).clip(min=1e-3)
j_type = torch.mean(x[:, 6:12], dim=1)
j_type = self.convert_format((j_type+0.5) * 5).clip(min=1., max=5.).round()
axis_d = self.convert_format(x[:, 12:15])
axis_d = axis_d / (np.linalg.norm(axis_d, axis=1, keepdims=True) + np.finfo(float).eps)
axis_o = self.convert_format(x[:, 15:18])
j_range = (x[:, 18:20] + x[:, 20:22] + x[:, 22:24]) / 3
j_range = self.convert_format(j_range).clip(min=-1., max=1.)
j_range[:, 0] = j_range[:, 0] * 360
j_range[:, 1] = j_range[:, 1]
label = torch.mean(x[:, 24:30], dim=1)
label = self.convert_format((label+0.8) * 5).clip(min=0., max=7.).round()
return {
'center': center,
'size': size,
'type': j_type,
'axis_d': axis_d,
'axis_o': axis_o,
'range': j_range,
'label': label
}
def convert_json_graph_only(self, c, idx):
out = {'diffuse_tree': []}
n_nodes = c['n_nodes'][idx].item()
par = c['parents'][idx].cpu().numpy().tolist()
adj = c['adj'][idx].cpu().numpy()
np.fill_diagonal(adj, 0)
for i in range(n_nodes):
node = {'id': i}
node['parent'] = int(par[i])
node['children'] = [intchild for child in np.where(adj[i] == 1)[0] if child != par[i]]
out['diffuse_tree'].append(node)
return out
def convert_json(self, x, c, idx):
n_nodes = c['n_nodes'][idx].item()
par = c['parents'][idx].cpu().numpy().tolist()
adj = c['adj'][idx].cpu().numpy()
np.fill_diagonal(adj, 0)
# convert the data to original range
data = self.convert_data_range(x)
# convert to json format
out = {'diffuse_tree': []}
out['meta'] = {
'obj_cat': c['obj_cat'][idx],
'tree_hash': c['tree_hash'][idx]
}
for i in range(n_nodes):
node = {'id': i}
node['name'] = label_ref['bwd'][int(data['label'][i].item())]
node['parent'] = int(par[i])
node['children'] = [int(child) for child in np.where(adj[i] == 1)[0] if child != par[i]]
node['aabb'] = {}
node['aabb']['center'] = data['center'][i].tolist()
node['aabb']['size'] = data['size'][i].tolist()
node['joint'] = {}
node['joint']['type'] = joint_ref['bwd'][int(data['type'][i].item())]
if node['joint']['type'] == 'fixed':
node['joint']['range'] = [0., 0.]
elif node['joint']['type'] == 'revolute':
node['joint']['range'] = [0., float(data['range'][i][0])]
elif node['joint']['type'] == 'continuous':
node['joint']['range'] = [0., 360.]
elif node['joint']['type'] == 'prismatic' or node['joint']['type'] == 'screw':
node['joint']['range'] = [0., float(data['range'][i][1])]
node['joint']['axis'] = {}
# relocate the axis to visualize well
axis_o, axis_d = rescale_axis(int(data['type'][i].item()), data['axis_d'][i], data['axis_o'][i], data['center'][i])
node['joint']['axis']['direction'] = axis_d
node['joint']['axis']['origin'] = axis_o
out['diffuse_tree'].append(node)
return out
# ------------------------------- visualizations ------------------------------- #
def prepare_meshes(self, info_dict):
'''
Function to prepare the bbox and axis meshes for visualization
Args:
- info_dict (dict): output json containing the graph information
'''
tree = info_dict['diffuse_tree']
bbox_0, bbox_1, axiss, labels, jtypes = [], [], [], [], []
root_id = 0
# get root id
for node in tree:
if node['parent'] == -1:
root_id = node['id']
for node in tree:
# retrieve info
box_cen = np.array(node['aabb']['center'])
box_size = np.array(node['aabb']['size'])
jrange = node['joint']['range']
jtype = node['joint']['type']
axis_d = np.array(node['joint']['axis']['direction'])
axis_o = np.array(node['joint']['axis']['origin'])
label = label_ref['fwd'][node['name']]
jtype_id = joint_ref['fwd'][node['joint']['type']]
# construct meshes for bbox
if node['id'] == root_id or node['parent'] == root_id: # no transform
bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_id, jrange=jrange, axis_d=axis_d, axis_o=axis_o)
else:
parent_id = node['parent']
jrange_p = tree[parent_id]['joint']['range']
jtype_p = tree[parent_id]['joint']['type']
jtype_p_id = joint_ref['fwd'][jtype_p]
axis_d_p = np.array(tree[parent_id]['joint']['axis']['direction'])
axis_o_p = np.array(tree[parent_id]['joint']['axis']['origin'])
bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_p_id, jrange=jrange_p, axis_d=axis_d_p, axis_o=axis_o_p)
# construct mesh for axis (the axis is not supporting transform for now)
|
class BaseSystem(pl.LightningModule, SaverMixin):
def __init__(self, hparams):
super().__init__()
self.hparams.update(hparams)
self.model = models.make(hparams.model.name, hparams.model)
self.scheduler = DDPMScheduler(**self.hparams.scheduler.config)
self.save_hyperparameters()
def setup(self, stage: str):
self.set_save_dir(stage) # config the logger dir for images
def configure_optimizers(self):
raise NotImplementedError
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def predict_step(self, batch, batch_idx, dataloader_idx=None):
raise NotImplementedError
# ------------------------------- data converters ------------------------------- #
def convert_data_range(self, x):
x = x.reshape(-1, 30) # (K, 30)
aabb_max = self.convert_format(x[:, 0:3])
aabb_min = self.convert_format(x[:, 3:6])
center = (aabb_max + aabb_min) / 2.
size = (aabb_max - aabb_min).clip(min=1e-3)
j_type = torch.mean(x[:, 6:12], dim=1)
j_type = self.convert_format((j_type+0.5) * 5).clip(min=1., max=5.).round()
axis_d = self.convert_format(x[:, 12:15])
axis_d = axis_d / (np.linalg.norm(axis_d, axis=1, keepdims=True) + np.finfo(float).eps)
axis_o = self.convert_format(x[:, 15:18])
j_range = (x[:, 18:20] + x[:, 20:22] + x[:, 22:24]) / 3
j_range = self.convert_format(j_range).clip(min=-1., max=1.)
j_range[:, 0] = j_range[:, 0] * 360
j_range[:, 1] = j_range[:, 1]
label = torch.mean(x[:, 24:30], dim=1)
label = self.convert_format((label+0.8) * 5).clip(min=0., max=7.).round()
return {
'center': center,
'size': size,
'type': j_type,
'axis_d': axis_d,
'axis_o': axis_o,
'range': j_range,
'label': label
}
def convert_json_graph_only(self, c, idx):
out = {'diffuse_tree': []}
n_nodes = c['n_nodes'][idx].item()
par = c['parents'][idx].cpu().numpy().tolist()
adj = c['adj'][idx].cpu().numpy()
np.fill_diagonal(adj, 0)
for i in range(n_nodes):
node = {'id': i}
node['parent'] = int(par[i])
node['children'] = [intchild for child in np.where(adj[i] == 1)[0] if child != par[i]]
out['diffuse_tree'].append(node)
return out
def convert_json(self, x, c, idx):
n_nodes = c['n_nodes'][idx].item()
par = c['parents'][idx].cpu().numpy().tolist()
adj = c['adj'][idx].cpu().numpy()
np.fill_diagonal(adj, 0)
# convert the data to original range
data = self.convert_data_range(x)
# convert to json format
out = {'diffuse_tree': []}
out['meta'] = {
'obj_cat': c['obj_cat'][idx],
'tree_hash': c['tree_hash'][idx]
}
for i in range(n_nodes):
node = {'id': i}
node['name'] = label_ref['bwd'][int(data['label'][i].item())]
node['parent'] = int(par[i])
node['children'] = [int(child) for child in np.where(adj[i] == 1)[0] if child != par[i]]
node['aabb'] = {}
node['aabb']['center'] = data['center'][i].tolist()
node['aabb']['size'] = data['size'][i].tolist()
node['joint'] = {}
node['joint']['type'] = joint_ref['bwd'][int(data['type'][i].item())]
if node['joint']['type'] == 'fixed':
node['joint']['range'] = [0., 0.]
elif node['joint']['type'] == 'revolute':
node['joint']['range'] = [0., float(data['range'][i][0])]
elif node['joint']['type'] == 'continuous':
node['joint']['range'] = [0., 360.]
elif node['joint']['type'] == 'prismatic' or node['joint']['type'] == 'screw':
node['joint']['range'] = [0., float(data['range'][i][1])]
node['joint']['axis'] = {}
# relocate the axis to visualize well
axis_o, axis_d = rescale_axis(int(data['type'][i].item()), data['axis_d'][i], data['axis_o'][i], data['center'][i])
node['joint']['axis']['direction'] = axis_d
node['joint']['axis']['origin'] = axis_o
out['diffuse_tree'].append(node)
return out
# ------------------------------- visualizations ------------------------------- #
def prepare_meshes(self, info_dict):
'''
Function to prepare the bbox and axis meshes for visualization
Args:
- info_dict (dict): output json containing the graph information
'''
tree = info_dict['diffuse_tree']
bbox_0, bbox_1, axiss, labels, jtypes = [], [], [], [], []
root_id = 0
# get root id
for node in tree:
if node['parent'] == -1:
root_id = node['id']
for node in tree:
# retrieve info
box_cen = np.array(node['aabb']['center'])
box_size = np.array(node['aabb']['size'])
jrange = node['joint']['range']
jtype = node['joint']['type']
axis_d = np.array(node['joint']['axis']['direction'])
axis_o = np.array(node['joint']['axis']['origin'])
label = label_ref['fwd'][node['name']]
jtype_id = joint_ref['fwd'][node['joint']['type']]
# construct meshes for bbox
if node['id'] == root_id or node['parent'] == root_id: # no transform
bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_id, jrange=jrange, axis_d=axis_d, axis_o=axis_o)
else:
parent_id = node['parent']
jrange_p = tree[parent_id]['joint']['range']
jtype_p = tree[parent_id]['joint']['type']
jtype_p_id = joint_ref['fwd'][jtype_p]
axis_d_p = np.array(tree[parent_id]['joint']['axis']['direction'])
axis_o_p = np.array(tree[parent_id]['joint']['axis']['origin'])
bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_p_id, jrange=jrange_p, axis_d=axis_d_p, axis_o=axis_o_p)
# construct mesh for axis (the axis is not supporting transform for now) | axis = get_axis_mesh(axis_d, axis_o, box_cen, jtype) | 8 | 2023-12-06 23:08:41+00:00 | 8k |
duxiaodan/intrinsic-lora | augunet_diode_pseudo_depth.py | [
{
"identifier": "plot_depth_map",
"path": "diode/diode.py",
"snippet": "def plot_depth_map(dm, validity_mask):\n validity_mask = validity_mask > 0\n MIN_DEPTH = 0.5\n MAX_DEPTH = min(300, np.percentile(dm, 99))\n dm = np.clip(dm, MIN_DEPTH, MAX_DEPTH)\n\n dm = (dm - np.min(dm)) / np.ptp(d... | import argparse
import logging
import math
import os
import os.path as osp
import random
import shutil
import wandb
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import torch.utils.checkpoint
import transformers
import diffusers
import copy
import json
import datetime
import matplotlib
import wandb
import xformers
import bitsandbytes as bnb
from pathlib import Path
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from torch.utils.data import Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
from PIL.ImageOps import exif_transpose
from diode.diode import (
plot_depth_map,
check_and_tuplize_tokens,
enumerate_paths,
_VALID_SPLITS,
_VALID_SCENE_TYPES
)
from torchvision.transforms.functional import pil_to_tensor
from torchvision.transforms.functional import to_pil_image
from rescale_cfg_pipeline_forward import new_call | 6,771 | ).images[0]
image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8))
val_test_images1.append(image)
if test_batches[1] is not None:
image = pipeline.new_call(
prompt_embeds = pipeline.text_encoder(test_batch2['input_ids'][ii:ii+1])[0],
# latents=test_batch2['noises'][ii].unsqueeze(0),
image = Image.fromarray(tensor2np(test_batch2['original_pixel_values'][ii])),
image_guidance_scale = 1.,
guidance_scale = 3.0,
generator=generator,
num_inference_steps = 25,
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
guidance_rescale = 0.7,
# output_type = 'np',
).images[0]
image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8))
val_test_images2.append(image)
val_train_image = pipeline.new_call(
prompt_embeds=pipeline.text_encoder(train_batch['input_ids'][ii:ii+1])[0],
# latents=train_batch['noises'][ii].unsqueeze(0),
image = Image.fromarray(tensor2np(train_batch['original_pixel_values'][ii])),
image_guidance_scale = 1.,
guidance_scale = 3.0,
generator = generator,
num_inference_steps = 25,
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
guidance_rescale = 0.7,
# output_type = 'np',
).images[0]
val_train_image = to_pil_image(pil_to_tensor(val_train_image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8))
val_train_images.append(val_train_image)
concat_test_images1 = []
concat_test_images2 = []
concat_train_images = []
for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images1.append(output_img)
for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images2.append(output_img)
for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_train_images.append(output_img)
for tracker in accelerator.trackers:
if tracker.name == "wandb":
tracker.log(
{
"validation: training images": [
wandb.Image(image, )
for i, image in enumerate(concat_train_images)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 1": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images1)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 2": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images2)
],
},
step=global_step
)
del pipeline
torch.cuda.empty_cache()
return
class PSEUDODepthDataset(Dataset):
def __init__(
self,
data_root,
pseudo_root,
tokenizer,
splits,
scene_types,
size=512,
center_crop=True,
num_train_imgs=None,
tokenizer_max_length=None,
empty_prompt = False,
unified_prompt = None,
):
self.data_root = Path(data_root)
self.pseudo_root = Path(pseudo_root)
self.splits = check_and_tuplize_tokens(
splits, _VALID_SPLITS
)
self.scene_types = check_and_tuplize_tokens(
scene_types, _VALID_SCENE_TYPES
)
meta_fname = self.data_root.parent / 'diode_meta.json'
with open(meta_fname, 'r') as f:
self.meta = json.load(f)
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.tokenizer_max_length = tokenizer_max_length
self.num_train_imgs = num_train_imgs
self.empty_prompt = empty_prompt
self.unified_prompt = unified_prompt
if not self.data_root.exists():
raise ValueError("Instance images root doesn't exists.")
imgs = []
for split in self.splits:
for scene_type in self.scene_types:
| # coding=utf-8
# Intrinsic-LoRA
"""Intrinsic-LoRA AugUNet model for depth training"""
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
logger = get_logger(__name__, log_level="INFO")
def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- lora
inference: true
---
"""
model_card = f"""
# LoRA text2image fine-tuning - {repo_id}
These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)
def colorize(
value,
cmap='inferno_r',
invalid_val=-99,
invalid_mask=None,
background_color=(128, 128, 128, 255),
gamma_corrected=False,
value_transform=None,
vmin=None,
vmax=None,
):
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
value = value.squeeze()
if invalid_mask is None:
invalid_mask = value == invalid_val
mask = np.logical_not(invalid_mask)
else:
mask = np.where(invalid_mask==1)
invalid_mask = np.where(invalid_mask==0)
vmax = np.percentile(value[mask],99)
if vmax!=0.:
value = value/vmax # vmin..vmax
else:
value = value * 0.
# squeeze last dim if it exists
# grey out the invalid values
if cmap=='singlechannel':
img = 1-np.stack([value]*3,axis=-1)
img[invalid_mask] = 0
return Image.fromarray(((img.clip(max=1.))*255.).astype(np.uint8))
else:
value[invalid_mask] = np.nan
cmapper = matplotlib.cm.get_cmap(cmap)
if value_transform:
value = value_transform(value)
value = cmapper(value, bytes=True) # (nxmx4)
img = value[...]
img[invalid_mask] = background_color
if gamma_corrected:
# gamma correction
img = img / 255
img = np.power(img, 2.2)
img = img * 255
img = img.astype(np.uint8)
return img
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
)
return text_inputs
def tensor2np(tensor):
return (255*(tensor.cpu().permute(1,2,0).numpy()*0.5+0.5)).astype(np.uint8)
def listPILToTensor(listPILs):
size = listPILs[0].size[0]
image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
return torch.stack([image_transforms(p) for p in listPILs])
def visualization_routine(gt,im_1,im_2,im_3):
gt = tensor2np(gt)
im_1 = tensor2np(im_1)
im_2 = tensor2np(im_2)
im_3 = np.array(im_3)
return Image.fromarray(np.hstack((im_1,gt,im_2,im_3)))
@torch.inference_mode()
def log_validation(
text_encoder,
tokenizer,
unet,
vae,
args,
accelerator,
zero_snr_betas,
test_batches,
train_batch,
weight_dtype,
epoch,
global_step
):
pipeline = DiffusionPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
revision=args.revision,
torch_dtype=weight_dtype,
safety_checker=None,
)
scheduler_args = {}
if "variance_type" in pipeline.scheduler.config:
variance_type = pipeline.scheduler.config.variance_type
if variance_type in ["learned", "learned_range"]:
variance_type = "fixed_small"
scheduler_args["variance_type"] = variance_type
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
pipeline.scheduler.config, **scheduler_args,
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
prediction_type='v_prediction',
trained_betas=zero_snr_betas,
)
assert pipeline.scheduler.prediction_type == "v_prediction"
assert pipeline.scheduler.alphas_cumprod[-1] == 0.
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
pipeline.new_call = new_call.__get__(pipeline, pipeline.__class__)
val_test_images1 = []
val_test_images2 = []
val_train_images = []
test_batch1 = test_batches[0]
test_batch2 = test_batches[1]
for ii in range(4):
with torch.no_grad():
if test_batches[0] is not None:
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed+100) if args.seed else None
image = pipeline.new_call(
prompt_embeds = pipeline.text_encoder(test_batch1['input_ids'][ii:ii+1])[0],
image = Image.fromarray(tensor2np(test_batch1['original_pixel_values'][ii])),
image_guidance_scale = 1.,
guidance_scale = 3.0,
generator=generator,
num_inference_steps = 25,
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
guidance_rescale = 0.7,
# output_type = 'np',
).images[0]
image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8))
val_test_images1.append(image)
if test_batches[1] is not None:
image = pipeline.new_call(
prompt_embeds = pipeline.text_encoder(test_batch2['input_ids'][ii:ii+1])[0],
# latents=test_batch2['noises'][ii].unsqueeze(0),
image = Image.fromarray(tensor2np(test_batch2['original_pixel_values'][ii])),
image_guidance_scale = 1.,
guidance_scale = 3.0,
generator=generator,
num_inference_steps = 25,
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
guidance_rescale = 0.7,
# output_type = 'np',
).images[0]
image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8))
val_test_images2.append(image)
val_train_image = pipeline.new_call(
prompt_embeds=pipeline.text_encoder(train_batch['input_ids'][ii:ii+1])[0],
# latents=train_batch['noises'][ii].unsqueeze(0),
image = Image.fromarray(tensor2np(train_batch['original_pixel_values'][ii])),
image_guidance_scale = 1.,
guidance_scale = 3.0,
generator = generator,
num_inference_steps = 25,
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
guidance_rescale = 0.7,
# output_type = 'np',
).images[0]
val_train_image = to_pil_image(pil_to_tensor(val_train_image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8))
val_train_images.append(val_train_image)
concat_test_images1 = []
concat_test_images2 = []
concat_train_images = []
for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images1.append(output_img)
for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images2.append(output_img)
for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_train_images.append(output_img)
for tracker in accelerator.trackers:
if tracker.name == "wandb":
tracker.log(
{
"validation: training images": [
wandb.Image(image, )
for i, image in enumerate(concat_train_images)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 1": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images1)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 2": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images2)
],
},
step=global_step
)
del pipeline
torch.cuda.empty_cache()
return
class PSEUDODepthDataset(Dataset):
def __init__(
self,
data_root,
pseudo_root,
tokenizer,
splits,
scene_types,
size=512,
center_crop=True,
num_train_imgs=None,
tokenizer_max_length=None,
empty_prompt = False,
unified_prompt = None,
):
self.data_root = Path(data_root)
self.pseudo_root = Path(pseudo_root)
self.splits = check_and_tuplize_tokens(
splits, _VALID_SPLITS
)
self.scene_types = check_and_tuplize_tokens(
scene_types, _VALID_SCENE_TYPES
)
meta_fname = self.data_root.parent / 'diode_meta.json'
with open(meta_fname, 'r') as f:
self.meta = json.load(f)
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.tokenizer_max_length = tokenizer_max_length
self.num_train_imgs = num_train_imgs
self.empty_prompt = empty_prompt
self.unified_prompt = unified_prompt
if not self.data_root.exists():
raise ValueError("Instance images root doesn't exists.")
imgs = []
for split in self.splits:
for scene_type in self.scene_types: | _curr = enumerate_paths(self.meta[split][scene_type]) | 2 | 2023-12-08 16:34:44+00:00 | 8k |
modelscope/llmuses | llmuses/run_ms.py | [
{
"identifier": "DATASET_ID",
"path": "llmuses/benchmarks/ceval/ceval_adapter.py",
"snippet": "DATASET_ID = 'modelscope/ceval-exam'"
},
{
"identifier": "DATASET_ID",
"path": "llmuses/benchmarks/mmlu/mmlu_adapter.py",
"snippet": "DATASET_ID = 'modelscope/mmlu'"
},
{
"identifier": ... | import argparse
import torch
from llmuses.benchmarks.ceval import DATASET_ID as CEVAL_EXAM
from llmuses.benchmarks.mmlu import DATASET_ID as MMLU
from llmuses.benchmarks.hellaswag import DATASET_ID as HELLA_SWAG
from llmuses.benchmarks.arc import DATASET_ID as ARC
from llmuses.benchmarks.truthful_qa import DATASET_ID as TRUTHFUL_QA
from llmuses.constants import DEFAULT_ROOT_CACHE_DIR
from llmuses.evaluator import Evaluator
from llmuses.models.model_adapter import MultiChoiceModelAdapter, ContinuationLogitsModelAdapter
from llmuses.utils.logger import get_logger
from llmuses.models.dummy_chat_model import DummyChatModel
from llmuses.benchmarks.ceval import CEVALAdapter
from llmuses.benchmarks.mmlu import MMLUAdapter
from llmuses.benchmarks.arc import ARCAdapter
from llmuses.benchmarks.hellaswag import HellaSwagAdapter
from llmuses.benchmarks.truthful_qa import TruthfulQaAdapter | 6,987 | # Copyright (c) Alibaba, Inc. and its affiliates.
# flake8: noqa
logger = get_logger()
# TODO: add more precision
MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16}
"""
Run evaluation process for ModelScope Leaderboard.
"""
def parse_args():
parser = argparse.ArgumentParser(description='Run evaluation on a model')
parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True)
parser.add_argument('--revision', help='Model revision.', required=False, default=None)
parser.add_argument('--precision', help='Model precision.', default='bf16')
parser.add_argument('--work-dir', help='root work cache dir.', default=None)
parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs')
| # Copyright (c) Alibaba, Inc. and its affiliates.
# flake8: noqa
logger = get_logger()
# TODO: add more precision
MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16}
"""
Run evaluation process for ModelScope Leaderboard.
"""
def parse_args():
parser = argparse.ArgumentParser(description='Run evaluation on a model')
parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True)
parser.add_argument('--revision', help='Model revision.', required=False, default=None)
parser.add_argument('--precision', help='Model precision.', default='bf16')
parser.add_argument('--work-dir', help='root work cache dir.', default=None)
parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs') | parser.add_argument('--datasets-dir', help='Datasets dir.', default=DEFAULT_ROOT_CACHE_DIR) | 5 | 2023-12-07 06:10:49+00:00 | 8k |
AsuradaYuci/TF-CLIP | processor/processor_clipreid_stage2.py | [
{
"identifier": "AverageMeter",
"path": "utils/meter.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self)... | import logging
import os
import time
import torch
import torch.nn as nn
import torch.distributed as dist
import collections
import time
from utils.meter import AverageMeter
from utils.metrics import R1_mAP_eval
from utils.iotools import save_checkpoint
from torch.cuda import amp
from torch.nn import functional as F
from loss.supcontrast import SupConLoss
from loss.softmax_loss import CrossEntropyLabelSmooth
from datetime import timedelta | 3,633 | optimizer_center.zero_grad()
img = img.to(device)
target = vid.to(device)
if cfg.MODEL.SIE_CAMERA:
target_cam = target_cam.to(device)
else:
target_cam = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
with amp.autocast(enabled=True):
B, T, C, H, W = img.shape # B=64, T=4.C=3 H=256,W=128
score, feat, logits1 = model(x = img, cam_label=target_cam, view_label=target_view, text_features2=cluster_features)
score1 = score[0:3]
score2 = score[3]
if (n_iter + 1) % log_period == 0:
loss1 = loss_fn(score1, feat, target, target_cam, logits1, isprint=True)
else:
loss1 = loss_fn(score1, feat, target, target_cam, logits1)
targetX = target.unsqueeze(1) # 12,1 => [94 94 10 10 15 15 16 16 75 75 39 39]
targetX = targetX.expand(B, T)
# 12,8 => [ [94...94][94...94][10...10][10...10] ... [39...39] [39...39]]
targetX = targetX.contiguous()
targetX = targetX.view(B * T,
-1) # 96 => [94...94 10...10 15...15 16...16 75...75 39...39]
targetX = targetX.squeeze(1)
loss_frame = xent_frame(score2, targetX)
loss = loss1 + loss_frame / T
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if 'center' in cfg.MODEL.METRIC_LOSS_TYPE:
for param in center_criterion.parameters():
param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT)
scaler.step(optimizer_center)
scaler.update()
acc1 = (logits1.max(1)[1] == target).float().mean()
acc_id1 = (score[0].max(1)[1] == target).float().mean()
acc_id2 = (score[3].max(1)[1] == targetX).float().mean()
loss_meter.update(loss.item(), img.shape[0])
acc_meter.update(acc1, 1)
acc_meter_id1.update(acc_id1, 1)
acc_meter_id2.update(acc_id2, 1)
torch.cuda.synchronize()
if (n_iter + 1) % log_period == 0:
logger.info(
"Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc_clip: {:.3f}, Acc_id1: {:.3f}, Acc_id2: {:.3f}, Base Lr: {:.2e}"
.format(epoch, (n_iter + 1), len(train_loader_stage2),
loss_meter.avg, acc_meter.avg, acc_meter_id1.avg, acc_meter_id2.avg, scheduler.get_lr()[0]))
scheduler.step()
end_time = time.time()
time_per_batch = (end_time - start_time) / (n_iter + 1)
if cfg.MODEL.DIST_TRAIN:
pass
else:
logger.info("Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]"
.format(epoch, time_per_batch, train_loader_stage2.batch_size / time_per_batch))
if epoch % eval_period == 0:
if cfg.MODEL.DIST_TRAIN:
if dist.get_rank() == 0:
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
if cfg.MODEL.SIE_CAMERA:
camids = camids.to(device)
else:
camids = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10, 20]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
else:
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
if cfg.MODEL.SIE_CAMERA:
camids = camids.to(device)
else:
camids = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10, 20]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
prec1 = cmc[0] + mAP
is_best = prec1 > best_performance
best_performance = max(prec1, best_performance)
if is_best:
best_epoch = epoch
|
def do_train_stage2(cfg,
model,
center_criterion,
train_loader_stage1,
train_loader_stage2,
val_loader,
optimizer,
optimizer_center,
scheduler,
loss_fn,
num_query, local_rank,num_classes):
log_period = cfg.SOLVER.STAGE2.LOG_PERIOD
eval_period = cfg.SOLVER.STAGE2.EVAL_PERIOD
device = "cuda"
epochs = cfg.SOLVER.STAGE2.MAX_EPOCHS
logger = logging.getLogger("TFCLIP.train")
logger.info('start training')
_LOCAL_PROCESS_GROUP = None
if device:
model.to(local_rank)
if torch.cuda.device_count() > 1 and cfg.MODEL.DIST_TRAIN:
print('Using {} GPUs for training'.format(torch.cuda.device_count()))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], find_unused_parameters=True)
loss_meter = AverageMeter()
acc_meter = AverageMeter()
acc_meter_id1 = AverageMeter()
acc_meter_id2 = AverageMeter()
evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
scaler = amp.GradScaler()
xent_frame = CrossEntropyLabelSmooth(num_classes=num_classes)
@torch.no_grad()
def generate_cluster_features(labels, features):
centers = collections.defaultdict(list)
for i, label in enumerate(labels):
if label == -1:
continue
centers[labels[i]].append(features[i])
centers = [
torch.stack(centers[idx], dim=0).mean(0) for idx in sorted(centers.keys())
]
centers = torch.stack(centers, dim=0)
return centers
# train
all_start_time = time.monotonic()
####### 1.CLIP-Memory module ####################
print("=> Automatically generating CLIP-Memory (might take a while, have a coffe)")
image_features = []
labels = []
with torch.no_grad():
for n_iter, (img, vid, target_cam, target_view) in enumerate(train_loader_stage1):
img = img.to(device) # torch.Size([64, 4, 3, 256, 128])
target = vid.to(device) # torch.Size([64])
if len(img.size()) == 6:
# method = 'dense'
b, n, s, c, h, w = img.size()
assert (b == 1)
img = img.view(b * n, s, c, h, w) # torch.Size([5, 8, 3, 256, 128])
with amp.autocast(enabled=True):
image_feature = model(img, get_image = True)
image_feature = image_feature.view(-1, image_feature.size(1))
image_feature = torch.mean(image_feature, 0, keepdim=True) # 1,512
for i, img_feat in zip(target, image_feature):
labels.append(i)
image_features.append(img_feat.cpu())
else:
with amp.autocast(enabled=True):
image_feature = model(img, get_image = True)
for i, img_feat in zip(target, image_feature):
labels.append(i)
image_features.append(img_feat.cpu())
labels_list = torch.stack(labels, dim=0).cuda() # N torch.Size([8256])
image_features_list = torch.stack(image_features, dim=0).cuda() # torch.Size([8256, 512])
cluster_features = generate_cluster_features(labels_list.cpu().numpy(), image_features_list).detach()
best_performance = 0.0
best_epoch = 1
for epoch in range(1, epochs + 1):
start_time = time.time()
loss_meter.reset()
acc_meter.reset()
acc_meter_id1.reset()
acc_meter_id2.reset()
evaluator.reset()
model.train()
for n_iter, (img, vid, target_cam, target_view) in enumerate(train_loader_stage2):
optimizer.zero_grad()
optimizer_center.zero_grad()
img = img.to(device)
target = vid.to(device)
if cfg.MODEL.SIE_CAMERA:
target_cam = target_cam.to(device)
else:
target_cam = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
with amp.autocast(enabled=True):
B, T, C, H, W = img.shape # B=64, T=4.C=3 H=256,W=128
score, feat, logits1 = model(x = img, cam_label=target_cam, view_label=target_view, text_features2=cluster_features)
score1 = score[0:3]
score2 = score[3]
if (n_iter + 1) % log_period == 0:
loss1 = loss_fn(score1, feat, target, target_cam, logits1, isprint=True)
else:
loss1 = loss_fn(score1, feat, target, target_cam, logits1)
targetX = target.unsqueeze(1) # 12,1 => [94 94 10 10 15 15 16 16 75 75 39 39]
targetX = targetX.expand(B, T)
# 12,8 => [ [94...94][94...94][10...10][10...10] ... [39...39] [39...39]]
targetX = targetX.contiguous()
targetX = targetX.view(B * T,
-1) # 96 => [94...94 10...10 15...15 16...16 75...75 39...39]
targetX = targetX.squeeze(1)
loss_frame = xent_frame(score2, targetX)
loss = loss1 + loss_frame / T
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if 'center' in cfg.MODEL.METRIC_LOSS_TYPE:
for param in center_criterion.parameters():
param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT)
scaler.step(optimizer_center)
scaler.update()
acc1 = (logits1.max(1)[1] == target).float().mean()
acc_id1 = (score[0].max(1)[1] == target).float().mean()
acc_id2 = (score[3].max(1)[1] == targetX).float().mean()
loss_meter.update(loss.item(), img.shape[0])
acc_meter.update(acc1, 1)
acc_meter_id1.update(acc_id1, 1)
acc_meter_id2.update(acc_id2, 1)
torch.cuda.synchronize()
if (n_iter + 1) % log_period == 0:
logger.info(
"Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc_clip: {:.3f}, Acc_id1: {:.3f}, Acc_id2: {:.3f}, Base Lr: {:.2e}"
.format(epoch, (n_iter + 1), len(train_loader_stage2),
loss_meter.avg, acc_meter.avg, acc_meter_id1.avg, acc_meter_id2.avg, scheduler.get_lr()[0]))
scheduler.step()
end_time = time.time()
time_per_batch = (end_time - start_time) / (n_iter + 1)
if cfg.MODEL.DIST_TRAIN:
pass
else:
logger.info("Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]"
.format(epoch, time_per_batch, train_loader_stage2.batch_size / time_per_batch))
if epoch % eval_period == 0:
if cfg.MODEL.DIST_TRAIN:
if dist.get_rank() == 0:
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
if cfg.MODEL.SIE_CAMERA:
camids = camids.to(device)
else:
camids = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10, 20]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
else:
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
if cfg.MODEL.SIE_CAMERA:
camids = camids.to(device)
else:
camids = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10, 20]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
prec1 = cmc[0] + mAP
is_best = prec1 > best_performance
best_performance = max(prec1, best_performance)
if is_best:
best_epoch = epoch | save_checkpoint(model.state_dict(), is_best, os.path.join(cfg.OUTPUT_DIR, 'checkpoint_ep.pth.tar')) | 2 | 2023-12-11 04:03:46+00:00 | 8k |
nexB/dejacode | dje/filters.py | [
{
"identifier": "Dataspace",
"path": "dje/models.py",
"snippet": "class Dataspace(models.Model):\n \"\"\"\n The Dataspace is a way to keep data for each organization data\n separated and still store them in the same database, schema or table.\n Therefore the Dataspace is part of the primary ... | import datetime
import json
import operator
import uuid
import django_filters
from functools import reduce
from django.contrib import messages
from django.contrib.admin import filters
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.search import SearchQuery
from django.contrib.postgres.search import SearchRank
from django.contrib.postgres.search import SearchVector
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models import Case
from django.db.models import IntegerField
from django.db.models import Q
from django.db.models import Value
from django.db.models import When
from django.forms import widgets
from django.forms.fields import MultipleChoiceField
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from dje.models import Dataspace
from dje.models import History
from dje.models import is_dataspace_related
from dje.models import is_secured
from dje.utils import database_re_escape
from dje.utils import extract_name_version
from dje.utils import get_uuids_list_sorted
from dje.utils import remove_field_from_query_dict | 6,158 | super().__init__(*args, **kwargs)
def filter(self, qs, value):
value = value or () # Make sure we have an iterable
# Even though not a noop, no point filtering if empty
if not value:
return qs
q = Q()
for v in set(value):
try:
name, version = extract_name_version(v)
except SyntaxError:
pass
else:
q |= Q(**{self.name_field_name: name, self.version_field_name: version})
if self.distinct:
return self.get_method(qs)(q).distinct()
return self.get_method(qs)(q)
class BooleanChoiceFilter(django_filters.ChoiceFilter):
def __init__(self, *args, **kwargs):
kwargs["empty_label"] = kwargs.pop("empty_label", "All")
kwargs["choices"] = kwargs.pop(
"choices",
(
("yes", _("Yes")),
("no", _("No")),
),
)
super().__init__(*args, **kwargs)
def filter(self, qs, value):
boolean_value = {"yes": True, "no": False}.get(value)
if boolean_value is not None:
return qs.filter(**{self.field_name: boolean_value}).distinct()
return qs
class ChoicesOnlyListFilterMixin:
"""Remove the 'All' choice from SimpleListFilter.choices()"""
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
"selected": str(self.value()) == str(lookup),
"query_string": cl.get_query_string(
{
self.parameter_name: lookup,
},
[],
),
"display": title,
}
class BaseDataspaceLookupsFilter(filters.SimpleListFilter):
def lookups(self, request, model_admin):
user_dataspace = request.user.dataspace
reference_dataspace = Dataspace.objects.get_reference()
if user_dataspace == reference_dataspace:
dataspaces = Dataspace.objects.all()
else:
dataspaces = [user_dataspace]
if reference_dataspace:
dataspaces.append(reference_dataspace)
return [(dataspace.id, dataspace.name) for dataspace in dataspaces]
class DataspaceFilter(ChoicesOnlyListFilterMixin, BaseDataspaceLookupsFilter):
"""
Scope the ChangeList results by a Dataspace.
Default is the current User Dataspace.
Anyone can look into reference Dataspace.
Only Reference User can look into other Dataspaces.
"""
title = _("dataspace")
parameter_name = "dataspace__id__exact"
def lookups(self, request, model_admin):
"""Set the lookup value for the current user dataspace choice to None."""
lookups = super().lookups(request, model_admin)
return [(None if name == request.user.dataspace.name else pk, name) for pk, name in lookups]
def queryset(self, request, queryset):
if self.value():
return queryset.scope_by_id(self.value())
return queryset.scope(request.user.dataspace)
class MissingInFilter(BaseDataspaceLookupsFilter):
"""
Filter by objects missing in the given dataspace, compared with the
current `DataspaceFilter.parameter_name` or user dataspace.
Both values for reference and target Dataspace are validated against the
self.lookup_choices to make sure the user has the proper access permissions.
This filter is only available to superusers, this is enforced in
DataspacedAdmin.get_list_filter()
"""
title = _("missing in")
parameter_name = "missing_in"
def queryset(self, request, queryset):
if not self.value():
return
valid_choices = [str(choice) for choice, _ in self.lookup_choices]
if str(self.value()) not in valid_choices:
raise IncorrectLookupParameters()
| #
# Copyright (c) nexB Inc. and others. All rights reserved.
# DejaCode is a trademark of nexB Inc.
# SPDX-License-Identifier: AGPL-3.0-only
# See https://github.com/nexB/dejacode for support or download.
# See https://aboutcode.org for more information about AboutCode FOSS projects.
#
IS_FILTER_LOOKUP_VAR = "_filter_lookup"
class FilterSetUtilsMixin:
def is_active(self):
"""Return True if any of the filter is active, except the 'sort' filter."""
return bool(
[field_name for field_name in self.form.changed_data if field_name not in ["sort"]]
)
def get_query_no_sort(self):
return remove_field_from_query_dict(self.data, "sort")
def get_filters_breadcrumbs(self):
return [
{
"label": self.filters[field_name].label,
"value": value,
"remove_url": remove_field_from_query_dict(self.data, field_name, value),
}
for field_name in self.form.changed_data
for value in self.data.getlist(field_name)
]
class DataspacedFilterSet(FilterSetUtilsMixin, django_filters.FilterSet):
related_only = []
def __init__(self, *args, **kwargs):
try:
self.dataspace = kwargs.pop("dataspace")
except KeyError:
raise AttributeError("A dataspace needs to be provided to this FilterSet.")
self.dynamic_qs = kwargs.pop("dynamic_qs", True)
self.parent_qs_cache = {}
super().__init__(*args, **kwargs)
for field_name, filter_ in self.filters.items():
# Dataspace scoping for FKs on DataspaceRelated models.
if hasattr(filter_, "queryset") and is_dataspace_related(filter_.queryset.model):
filter_.queryset = filter_.queryset.scope(self.dataspace)
if field_name in self.related_only:
self.apply_related_only(field_name, filter_)
usage_policy = self.filters.get("usage_policy")
if usage_policy:
model_name = self._meta.model._meta.model_name
usage_policy.queryset = usage_policy.queryset.filter(content_type__model=model_name)
def apply_related_only(self, field_name, filter_):
"""
Limit the filter choices to the values used on the parent queryset.
This logic emulate a facets logic.
See also `django.contrib.admin.filters.RelatedOnlyFieldListFilter`.
"""
parent_qs = self.get_parent_qs_for_related_only(field_name)
is_related_field = hasattr(filter_, "queryset")
if is_related_field: # FK type fields
filter_.queryset = filter_.queryset.distinct().filter(
pk__in=parent_qs.values_list(f"{field_name}__pk", flat=True)
)
else: # Choices type fields
choices_qs = (
parent_qs.order_by(field_name).distinct().values_list(field_name, flat=True)
)
filter_.extra["choices"] = [
choice for choice in filter_.extra["choices"] if choice[0] in choices_qs
]
def get_parent_qs_for_related_only(self, field_name):
"""
Return the parent QuerySet with active filters applied
except for the given `filter_name`.
The model default manager is used in place of the self.queryset
since it do not containing the annotations and select/prefetch_related
that are not needed for that dynamic filtering.
"""
parent_qs = self._meta.model._default_manager.scope(self.dataspace)
if not self.dynamic_qs:
return parent_qs
data = self.data.copy()
# `sort` is only used for ordering and does not apply here.
# Removing it from the queryset improves the performances.
fields_to_remove = [
"sort",
field_name,
]
for name in fields_to_remove:
data.pop(name, None)
if not data:
return parent_qs
cache_key = json.dumps(data, sort_keys=True)
cached_qs = self.parent_qs_cache.get(cache_key, None)
if cached_qs:
return cached_qs
filterset = self.__class__(
data=data,
dataspace=self.dataspace,
queryset=parent_qs,
dynamic_qs=False,
)
self.parent_qs_cache[cache_key] = filterset.qs
return filterset.qs
class SearchFilter(django_filters.CharFilter):
def __init__(self, search_fields, *args, **kwargs):
super().__init__(*args, **kwargs)
self.search_fields = search_fields
def filter(self, qs, value):
lookup_type = "icontains"
for bit in value.split():
or_queries = [
models.Q(**{f"{field}__{lookup_type}": bit}) for field in self.search_fields
]
qs = qs.filter(reduce(operator.or_, or_queries))
return qs
class SearchRankFilter(SearchFilter):
"""
Search on multiple fields using django.contrib.postgres.search module capabilities.
For better performance, all given `search_fields` should be indexed (db_index=True).
"""
def __init__(self, min_rank=0.01, *args, **kwargs):
super().__init__(*args, **kwargs)
self.min_rank = min_rank
def filter(self, qs, value):
if not value:
return qs
vector = SearchVector(*self.search_fields)
query = SearchQuery(value)
default_ordering = qs.model._meta.ordering
qs = (
qs.annotate(rank=SearchRank(vector, query))
.filter(rank__gte=self.min_rank)
.order_by("-rank", *default_ordering)
)
return qs.distinct() if self.distinct else qs
class MatchOrderedSearchFilter(SearchRankFilter):
"""
Start with a case-insensitive containment search on the `name` field,
ordering based on the match type using annotations.
If that simple search Return nothing, fallback to the SearchRankFilter
searching, this allows "name version" type string to return some results.
Postgres pattern matching docs available at:
https://www.postgresql.org/docs/10/static/functions-matching.html#POSIX-CONSTRAINT-ESCAPES-TABLE
"""
def __init__(self, match_order_fields, *args, **kwargs):
super().__init__(*args, **kwargs)
self.match_order_fields = match_order_fields
def get_match_order_lookups(self, lookup_type, value):
or_queries = [
models.Q(**{f"{field}__{lookup_type}": value}) for field in self.match_order_fields
]
return reduce(operator.or_, or_queries)
def filter(self, qs, value):
if not value:
return qs
# \y matches only at the beginning or end of a word
regex_escaped_value = r"\y{}\y".format(database_re_escape(value))
# All matching patterns are applied case-insensitive
match_order = Case(
# 1. Exact match
When(self.get_match_order_lookups("iexact", value), then=Value(1)),
# 2. Contains word with boundaries
When(self.get_match_order_lookups("iregex", regex_escaped_value), then=Value(2)),
# 3. Contains word
default=Value(3), # default `icontains` clause in `.filter()`
output_field=IntegerField(),
)
default_ordering = self.model._meta.ordering
simple_search_qs = (
qs.filter(self.get_match_order_lookups("icontains", value))
.annotate(match_order=match_order)
.order_by("match_order", *default_ordering)
)
if simple_search_qs.exists():
if self.distinct:
simple_search_qs = simple_search_qs.distinct()
return simple_search_qs
return super().filter(qs, value)
class ProgressiveTextSearchFilter(SearchRankFilter):
"""Start with a icontains search before falling back on a ranking search."""
def filter(self, qs, value):
if not value:
return qs
if len(self.search_fields) != 1:
raise ImproperlyConfigured(f"Only 1 field supported for {self.__class__}")
search_field = self.search_fields[0]
contains_search_qs = qs.filter(**{f"{search_field}__icontains": value})
if list(contains_search_qs):
return contains_search_qs
vector = SearchVector(search_field)
query = SearchQuery(value)
return (
qs.annotate(rank=SearchRank(vector, query))
.filter(rank__gte=self.min_rank)
.order_by("-rank")
)
class DefaultOrderingFilter(django_filters.OrderingFilter):
"""Add default ordering from model meta after the provided value."""
def filter(self, qs, value):
qs = super().filter(qs, value)
ordering = qs.query.order_by
if not ordering:
return qs
# Add the default ordering from the model and override the order_by value
for field_name in self.model._meta.ordering:
if field_name not in ordering:
ordering += (field_name,)
return qs.order_by(*ordering)
class CharMultipleWidget(widgets.TextInput):
"""
Enable the support for `MultiValueDict` `?field=a&field=b`
reusing the `SelectMultiple.value_from_datadict()` but render as a `TextInput`.
"""
def value_from_datadict(self, data, files, name):
value = widgets.SelectMultiple().value_from_datadict(data, files, name)
if not value or value == [""]:
return ""
return value
def format_value(self, value):
"""Return a value as it should appear when rendered in a template."""
return ", ".join(value)
class MultipleCharField(MultipleChoiceField):
widget = CharMultipleWidget
def valid_value(self, value):
return True
class MultipleCharFilter(django_filters.MultipleChoiceFilter):
"""Filter on multiple values for a CharField type using `?field=a&field=b` URL syntax."""
field_class = MultipleCharField
class MultipleUUIDField(MultipleChoiceField):
widget = CharMultipleWidget
def valid_value(self, value):
try:
uuid.UUID(value)
except ValueError:
return False
return True
class MultipleUUIDFilter(django_filters.MultipleChoiceFilter):
"""Filter on multiple values for an `UUIDField` type using `?field=a&field=b` URL syntax."""
help_text = "Exact UUID. Multi-value supported."
field_class = MultipleUUIDField
def __init__(self, *args, **kwargs):
kwargs.setdefault("help_text", self.help_text)
super().__init__(*args, **kwargs)
class LastModifiedDateFilter(django_filters.DateTimeFilter):
help_text = (
"Limits to records created or updated since that date. "
'Supports both "YYYY-MM-DD" date and "YYYY-MM-DD HH:MM" datetime.'
)
def __init__(self, *args, **kwargs):
kwargs.setdefault("help_text", self.help_text)
kwargs["lookup_expr"] = "gte"
super().__init__(*args, **kwargs)
class NameVersionFilter(MultipleCharFilter):
"""
Filter by `name:version` syntax.
Supports multiple values: `?name_version=Name:Version&name_version=Name:Version`
"""
help_text = (
'Exact match on name/version using the syntax "name:version". Multi-value supported.'
)
def __init__(self, *args, **kwargs):
kwargs.setdefault("help_text", self.help_text)
self.name_field_name = kwargs.pop("name_field_name", "name")
self.version_field_name = kwargs.pop("version_field_name", "version")
super().__init__(*args, **kwargs)
def filter(self, qs, value):
value = value or () # Make sure we have an iterable
# Even though not a noop, no point filtering if empty
if not value:
return qs
q = Q()
for v in set(value):
try:
name, version = extract_name_version(v)
except SyntaxError:
pass
else:
q |= Q(**{self.name_field_name: name, self.version_field_name: version})
if self.distinct:
return self.get_method(qs)(q).distinct()
return self.get_method(qs)(q)
class BooleanChoiceFilter(django_filters.ChoiceFilter):
def __init__(self, *args, **kwargs):
kwargs["empty_label"] = kwargs.pop("empty_label", "All")
kwargs["choices"] = kwargs.pop(
"choices",
(
("yes", _("Yes")),
("no", _("No")),
),
)
super().__init__(*args, **kwargs)
def filter(self, qs, value):
boolean_value = {"yes": True, "no": False}.get(value)
if boolean_value is not None:
return qs.filter(**{self.field_name: boolean_value}).distinct()
return qs
class ChoicesOnlyListFilterMixin:
"""Remove the 'All' choice from SimpleListFilter.choices()"""
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
"selected": str(self.value()) == str(lookup),
"query_string": cl.get_query_string(
{
self.parameter_name: lookup,
},
[],
),
"display": title,
}
class BaseDataspaceLookupsFilter(filters.SimpleListFilter):
def lookups(self, request, model_admin):
user_dataspace = request.user.dataspace
reference_dataspace = Dataspace.objects.get_reference()
if user_dataspace == reference_dataspace:
dataspaces = Dataspace.objects.all()
else:
dataspaces = [user_dataspace]
if reference_dataspace:
dataspaces.append(reference_dataspace)
return [(dataspace.id, dataspace.name) for dataspace in dataspaces]
class DataspaceFilter(ChoicesOnlyListFilterMixin, BaseDataspaceLookupsFilter):
"""
Scope the ChangeList results by a Dataspace.
Default is the current User Dataspace.
Anyone can look into reference Dataspace.
Only Reference User can look into other Dataspaces.
"""
title = _("dataspace")
parameter_name = "dataspace__id__exact"
def lookups(self, request, model_admin):
"""Set the lookup value for the current user dataspace choice to None."""
lookups = super().lookups(request, model_admin)
return [(None if name == request.user.dataspace.name else pk, name) for pk, name in lookups]
def queryset(self, request, queryset):
if self.value():
return queryset.scope_by_id(self.value())
return queryset.scope(request.user.dataspace)
class MissingInFilter(BaseDataspaceLookupsFilter):
"""
Filter by objects missing in the given dataspace, compared with the
current `DataspaceFilter.parameter_name` or user dataspace.
Both values for reference and target Dataspace are validated against the
self.lookup_choices to make sure the user has the proper access permissions.
This filter is only available to superusers, this is enforced in
DataspacedAdmin.get_list_filter()
"""
title = _("missing in")
parameter_name = "missing_in"
def queryset(self, request, queryset):
if not self.value():
return
valid_choices = [str(choice) for choice, _ in self.lookup_choices]
if str(self.value()) not in valid_choices:
raise IncorrectLookupParameters()
| return queryset.exclude(uuid__in=get_uuids_list_sorted(self.value(), queryset.model)) | 6 | 2023-12-07 16:57:42+00:00 | 8k |
kylemcdonald/i2i-realtime | solo_app.py | [
{
"identifier": "ThreadedWorker",
"path": "threaded_worker.py",
"snippet": "class ThreadedWorker:\n def __init__(self, has_input=True, has_output=True, mode=\"thread\", debug=False):\n if mode == \"thread\":\n self.ParallelClass = threading.Thread\n self.QueueClass = queu... | import time
import zmq
import sdl2
import sdl2.ext
import numpy as np
import ctypes
import numpy as np
import torch
import torch.nn.functional as F
from threaded_worker import ThreadedWorker
from diffusion_processor import DiffusionProcessor
from settings import Settings
from settings_api import SettingsAPI
from osc_settings_controller import OscSettingsController | 5,707 | if len(msg) == 8294400:
img = torch.from_numpy(unpack_rgb444_image(msg, (1080, 1920)))
elif len(msg) == 4147200:
img = torch.frombuffer(msg, dtype=torch.uint8).view(1080, 1920, 2)
else:
print(f"Unknown image size {len(msg)}")
return
# self.batch.append(img) # on CPU from here
self.batch.append(img.to("cuda")) # on GPU from here
self.settings_batch.append(settings.copy())
n = self.batch_size
if len(self.batch) >= n:
batch = torch.stack(self.batch[:n]) # save the first n elements
if batch.shape[1] == 3:
batch = half_size_batch(batch)
elif batch.shape[-1] == 2:
batch = uyvy_to_rgb_batch(batch)
else:
print("unknown channels")
settings_batch = self.settings_batch[:n]
self.batch = self.batch[n:] # drop the first n elements
self.settings_batch = self.settings_batch[n:]
return batch, settings_batch
def cleanup(self):
self.sock.close()
self.context.term()
class Processor(ThreadedWorker):
def __init__(self, settings):
super().__init__(has_input=True, has_output=True, debug=True)
self.batch_size = settings.batch_size
self.settings = settings
def setup(self):
warmup = f"{self.batch_size}x540x960x3"
self.diffusion_processor = DiffusionProcessor(warmup=warmup)
self.clear_input() # drop old frames
def work(self, args):
images, settings_batch = args
# cuda_images = torch.FloatTensor(np.array(images)).to("cuda")
results = self.diffusion_processor.run(
images=images,
prompt=self.settings.prompt,
use_compel=True,
num_inference_steps=2,
strength=0.7,
seed=self.settings.seed)
for frame_settings, image, result in zip(settings_batch, images, results):
if frame_settings.opacity == 1:
self.output_queue.put(result)
else:
opacity = float(frame_settings.opacity)
input_image = np.transpose(image.cpu().numpy(), (1, 2, 0))[:result.shape[0]]
blended = result * opacity + input_image * (1 - opacity)
self.output_queue.put(blended)
class Display(ThreadedWorker):
def __init__(self, batch_size):
super().__init__(has_input=True, has_output=False)
self.fullscreen = True
self.batch_size = batch_size
self.width = 960
self.height = 536
self.channels = 3
self.frame_repeat = 2
def setup(self):
sdl2.ext.init()
self.window = sdl2.ext.Window("i2i", size=(self.width, self.height))
self.renderer = sdl2.ext.Renderer(self.window, flags=sdl2.SDL_RENDERER_ACCELERATED | sdl2.SDL_RENDERER_PRESENTVSYNC)
self.window.show()
self.event = sdl2.SDL_Event()
self.texture = sdl2.SDL_CreateTexture(self.renderer.sdlrenderer,
sdl2.SDL_PIXELFORMAT_RGB24,
sdl2.SDL_TEXTUREACCESS_STREAMING,
self.width, self.height)
if self.fullscreen:
sdl2.SDL_SetWindowFullscreen(self.window.window, sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP)
self.clear_input() # drop old frames
def work(self, frame):
while self.input_queue.qsize() > self.batch_size:
# print("dropping frame")
frame = self.input_queue.get()
# Event handling
while sdl2.SDL_PollEvent(ctypes.byref(self.event)):
if self.event.type == sdl2.SDL_QUIT:
self.should_exit = True
elif self.event.type == sdl2.SDL_KEYDOWN:
keysym = self.event.key.keysym.sym
if keysym == sdl2.SDLK_f:
self.fullscreen = not self.fullscreen
mode = sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP if self.fullscreen else 0
sdl2.SDL_SetWindowFullscreen(self.window.window, mode)
# Update texture
image_data = (frame * 255).astype(np.uint8)
sdl2.SDL_UpdateTexture(self.texture, None, image_data.ctypes.data, self.width * self.channels)
# Render noise on screen
sdl2.SDL_RenderClear(self.renderer.sdlrenderer)
for i in range(self.frame_repeat):
sdl2.SDL_RenderCopy(self.renderer.sdlrenderer, self.texture, None, None)
sdl2.SDL_RenderPresent(self.renderer.sdlrenderer)
# Renderer will now wait for vsync
def cleanup(self):
sdl2.SDL_DestroyTexture(self.texture)
sdl2.ext.quit()
settings = Settings()
|
def unpack_rgb444_image(buffer, image_shape):
mask = (2<<10) - 1
img = np.frombuffer(buffer, dtype=np.uint32).reshape(*image_shape).byteswap()
red = (img >> 20) & mask
green = (img >> 10) & mask
blue = (img) & mask
unpacked_image = np.stack((red, green, blue)).astype(np.float32) / 1024.
return unpacked_image
def half_size_batch(batch):
return F.interpolate(batch, scale_factor=0.5, mode='area')
def uyvy_to_rgb_batch(uyvy_images):
# Convert the batch of images to float32
uyvy_f32 = uyvy_images.to(torch.float32)
# Handle the Y channel
y_channel = uyvy_f32[:, :, :, 1].unsqueeze(1) # Keep the Y channel in its own dimension
y_channel = F.interpolate(y_channel, scale_factor=0.5, mode='area')
# Handle the U channel
u_channel = uyvy_f32[:, :, 0::2, 0].unsqueeze(1)
h, w = y_channel.shape[-2], y_channel.shape[-1] # Extract the new dimensions after Y interpolation
u_channel = F.interpolate(u_channel, size=(h,w), mode='area')
# Handle the V channel
v_channel = uyvy_f32[:, :, 1::2, 0].unsqueeze(1)
v_channel = F.interpolate(v_channel, size=(h,w), mode='area')
# Normalize channels to [0,1] range
y_channel /= 255.0
u_channel /= 255.0
v_channel /= 255.0
# Recalculate R, G, B based on Y, U, V
r = y_channel + 1.402 * (v_channel - 0.5)
g = y_channel - 0.344136 * (u_channel - 0.5) - 0.714136 * (v_channel - 0.5)
b = y_channel + 1.772 * (u_channel - 0.5)
# Stack the channels and clamp the values
rgb_images = torch.cat((r, g, b), dim=1) # Concatenate along the color channel dimension
rgb_images = torch.clamp(rgb_images, 0.0, 1.0)
return rgb_images
class Receiver(ThreadedWorker):
def __init__(self, batch_size):
super().__init__(has_input=False, has_output=True)
self.batch_size = batch_size
def setup(self):
self.context = zmq.Context()
self.sock = self.context.socket(zmq.SUB)
address = f"ipc:///tmp/zmq"
print(f"Connecting to {address}")
self.sock.connect(address)
self.sock.setsockopt(zmq.SUBSCRIBE, b"")
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
self.batch = []
self.settings_batch = []
def work(self):
try:
msg = self.sock.recv(copy=False).bytes
except zmq.Again:
return
if len(msg) == 8294400:
img = torch.from_numpy(unpack_rgb444_image(msg, (1080, 1920)))
elif len(msg) == 4147200:
img = torch.frombuffer(msg, dtype=torch.uint8).view(1080, 1920, 2)
else:
print(f"Unknown image size {len(msg)}")
return
# self.batch.append(img) # on CPU from here
self.batch.append(img.to("cuda")) # on GPU from here
self.settings_batch.append(settings.copy())
n = self.batch_size
if len(self.batch) >= n:
batch = torch.stack(self.batch[:n]) # save the first n elements
if batch.shape[1] == 3:
batch = half_size_batch(batch)
elif batch.shape[-1] == 2:
batch = uyvy_to_rgb_batch(batch)
else:
print("unknown channels")
settings_batch = self.settings_batch[:n]
self.batch = self.batch[n:] # drop the first n elements
self.settings_batch = self.settings_batch[n:]
return batch, settings_batch
def cleanup(self):
self.sock.close()
self.context.term()
class Processor(ThreadedWorker):
def __init__(self, settings):
super().__init__(has_input=True, has_output=True, debug=True)
self.batch_size = settings.batch_size
self.settings = settings
def setup(self):
warmup = f"{self.batch_size}x540x960x3"
self.diffusion_processor = DiffusionProcessor(warmup=warmup)
self.clear_input() # drop old frames
def work(self, args):
images, settings_batch = args
# cuda_images = torch.FloatTensor(np.array(images)).to("cuda")
results = self.diffusion_processor.run(
images=images,
prompt=self.settings.prompt,
use_compel=True,
num_inference_steps=2,
strength=0.7,
seed=self.settings.seed)
for frame_settings, image, result in zip(settings_batch, images, results):
if frame_settings.opacity == 1:
self.output_queue.put(result)
else:
opacity = float(frame_settings.opacity)
input_image = np.transpose(image.cpu().numpy(), (1, 2, 0))[:result.shape[0]]
blended = result * opacity + input_image * (1 - opacity)
self.output_queue.put(blended)
class Display(ThreadedWorker):
def __init__(self, batch_size):
super().__init__(has_input=True, has_output=False)
self.fullscreen = True
self.batch_size = batch_size
self.width = 960
self.height = 536
self.channels = 3
self.frame_repeat = 2
def setup(self):
sdl2.ext.init()
self.window = sdl2.ext.Window("i2i", size=(self.width, self.height))
self.renderer = sdl2.ext.Renderer(self.window, flags=sdl2.SDL_RENDERER_ACCELERATED | sdl2.SDL_RENDERER_PRESENTVSYNC)
self.window.show()
self.event = sdl2.SDL_Event()
self.texture = sdl2.SDL_CreateTexture(self.renderer.sdlrenderer,
sdl2.SDL_PIXELFORMAT_RGB24,
sdl2.SDL_TEXTUREACCESS_STREAMING,
self.width, self.height)
if self.fullscreen:
sdl2.SDL_SetWindowFullscreen(self.window.window, sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP)
self.clear_input() # drop old frames
def work(self, frame):
while self.input_queue.qsize() > self.batch_size:
# print("dropping frame")
frame = self.input_queue.get()
# Event handling
while sdl2.SDL_PollEvent(ctypes.byref(self.event)):
if self.event.type == sdl2.SDL_QUIT:
self.should_exit = True
elif self.event.type == sdl2.SDL_KEYDOWN:
keysym = self.event.key.keysym.sym
if keysym == sdl2.SDLK_f:
self.fullscreen = not self.fullscreen
mode = sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP if self.fullscreen else 0
sdl2.SDL_SetWindowFullscreen(self.window.window, mode)
# Update texture
image_data = (frame * 255).astype(np.uint8)
sdl2.SDL_UpdateTexture(self.texture, None, image_data.ctypes.data, self.width * self.channels)
# Render noise on screen
sdl2.SDL_RenderClear(self.renderer.sdlrenderer)
for i in range(self.frame_repeat):
sdl2.SDL_RenderCopy(self.renderer.sdlrenderer, self.texture, None, None)
sdl2.SDL_RenderPresent(self.renderer.sdlrenderer)
# Renderer will now wait for vsync
def cleanup(self):
sdl2.SDL_DestroyTexture(self.texture)
sdl2.ext.quit()
settings = Settings() | settings_api = SettingsAPI(settings) | 3 | 2023-12-05 12:32:28+00:00 | 8k |
wusize/CLIM | src/open_clip/eva_clip/eva_vit_model.py | [
{
"identifier": "PatchDropout",
"path": "src/open_clip/eva_clip/transformer.py",
"snippet": "class PatchDropout(nn.Module):\n \"\"\"\n https://arxiv.org/abs/2212.00794\n \"\"\"\n\n def __init__(self, prob, exclude_first_token=True):\n super().__init__()\n assert 0 <= prob < 1.\... | import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import xformers.ops as xops
from functools import partial
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.layers import drop_path, to_2tuple, trunc_normal_
from .transformer import PatchDropout
from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
from torchvision.ops import roi_align
from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
from torch.utils.checkpoint import checkpoint
from torch.utils.checkpoint import checkpoint
from typing import Sequence | 6,669 | x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
def forward_without_attn(self, x):
if self.gamma_1 is None:
if self.postnorm:
x = x + self.drop_path(self.norm1(self.attn.proj_without_attn(x)))
x = x + self.drop_path(self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.attn.proj_without_attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
if self.postnorm:
x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn.proj_without_attn(x)))
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn.proj_without_attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class EVAVisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0.,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False,
use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False,
pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False):
super().__init__()
self.image_size = img_size
self.num_heads = num_heads
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
if rope:
half_head_dim = embed_dim // num_heads // 2
hw_seq_len = img_size // patch_size
| # --------------------------------------------------------
# Adapted from https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
try:
except:
if os.getenv('ENV_TYPE') == 'deepspeed':
try:
except:
else:
try:
except ImportError:
xops = None
print("Please 'pip install xformers'")
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
drop=0.,
subln=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.ffn_ln(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SwiGLU(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.,
norm_layer=nn.LayerNorm, subln=False):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.w1 = nn.Linear(in_features, hidden_features)
self.w2 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
self.w3 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x1 = self.w1(x)
x2 = self.w2(x)
hidden = self.act(x1) * x2
x = self.ffn_ln(hidden)
x = self.w3(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.subln = subln
if self.subln:
self.q_proj = nn.Linear(dim, all_head_dim, bias=False)
self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
self.v_proj = nn.Linear(dim, all_head_dim, bias=False)
else:
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity()
# self.proj = nn.Linear(all_head_dim, all_head_dim)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.xattn = xattn
self.xattn_drop = attn_drop
self.rope = rope
def forward(self, x, rel_pos_bias=None, attn_mask=None):
B, N, C = x.shape
if self.subln:
q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias)
k = F.linear(input=x, weight=self.k_proj.weight, bias=None)
v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C
k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
else:
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C
q, k, v = qkv[0], qkv[1], qkv[2]
if self.rope:
if attn_mask is not None:
attn_mask = attn_mask.to(q)
# slightly fast impl
q_t = q[:, :, 1:, :]
ro_q_t = self.rope(q_t)
q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v)
k_t = k[:, :, 1:, :]
ro_k_t = self.rope(k_t)
k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v)
if self.xattn:
q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
x = xops.memory_efficient_attention(
q, k, v,
p=self.xattn_drop,
scale=self.scale,
attn_bias=attn_mask # to allow masked attention
)
x = x.reshape(B, N, -1)
x = self.inner_attn_ln(x)
x = self.proj(x)
x = self.proj_drop(x)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0).type_as(attn)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias.type_as(attn)
if attn_mask is not None:
attn_mask = attn_mask.bool()
attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.inner_attn_ln(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
def proj_without_attn(self, x):
x = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
# B, num_heads, C
x = self.inner_attn_ln(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None, xattn=False, rope=None, postnorm=False,
subln=False, naiveswiglu=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim,
xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
if naiveswiglu:
self.mlp = SwiGLU(
in_features=dim,
hidden_features=mlp_hidden_dim,
subln=subln,
norm_layer=norm_layer,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
subln=subln,
drop=drop
)
if init_values is not None and init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
self.postnorm = postnorm
def forward(self, x, rel_pos_bias=None, attn_mask=None):
if self.gamma_1 is None:
if self.postnorm:
x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
x = x + self.drop_path(self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
if self.postnorm:
x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
def forward_without_attn(self, x):
if self.gamma_1 is None:
if self.postnorm:
x = x + self.drop_path(self.norm1(self.attn.proj_without_attn(x)))
x = x + self.drop_path(self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.attn.proj_without_attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
if self.postnorm:
x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn.proj_without_attn(x)))
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn.proj_without_attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class EVAVisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0.,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False,
use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False,
pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False):
super().__init__()
self.image_size = img_size
self.num_heads = num_heads
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
if rope:
half_head_dim = embed_dim // num_heads // 2
hw_seq_len = img_size // patch_size | self.rope = VisionRotaryEmbeddingFast( | 2 | 2023-12-09 05:43:08+00:00 | 8k |
moonshot-admin/moonshot | third-party/tqdm-4.66.1/tqdm/std.py | [
{
"identifier": "TMonitor",
"path": "third-party/tqdm-4.66.1/tqdm/_monitor.py",
"snippet": "class TMonitor(Thread):\n \"\"\"\n Monitoring thread for tqdm bars.\n Monitors if tqdm bars are taking too much time to display\n and readjusts miniters automatically if necessary.\n\n Parameters\n... | import sys
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
from datetime import datetime, timedelta
from numbers import Number
from time import time
from warnings import warn
from weakref import WeakSet
from ._monitor import TMonitor
from .utils import (
CallbackIOWrapper, Comparable, DisableOnWriteError, FormatReplace, SimpleTextIOWrapper,
_is_ascii, _screen_shape_wrapper, _supports_unicode, _term_move_up, disp_len, disp_trim,
envwrap)
from threading import RLock
from multiprocessing import RLock
from warnings import catch_warnings, simplefilter
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
from pandas.core.window.rolling import _Rolling_and_Expanding
from pandas.core.window import _Rolling_and_Expanding
from pandas.core.window.expanding import Expanding
from pandas.core.window.rolling import Rolling
from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
from pandas.core.groupby.groupby import GroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby.groupby import PanelGroupBy
from pandas.core.groupby import PanelGroupBy
from pandas.core.common import is_builtin_func | 5,983 |
tqdm_kwargs = tqdm_kwargs.copy()
deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif (_Rolling_and_Expanding is None or
not isinstance(df, _Rolling_and_Expanding)):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = cls(total=total, **tqdm_kwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try: # pandas>=1.3.0
except ImportError:
is_builtin_func = df._is_builtin_func
try:
func = is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
try:
return getattr(df, df_function)(wrapper, **kwargs)
finally:
t.close()
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if Rolling is not None and Expanding is not None:
Rolling.progress_apply = inner_generator()
Expanding.progress_apply = inner_generator()
elif _Rolling_and_Expanding is not None:
_Rolling_and_Expanding.progress_apply = inner_generator()
# override defaults via env vars
@envwrap("TQDM_", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,
'position': int, 'nrows': int})
def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,
ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,
ascii=None, disable=False, unit='it', unit_scale=False,
dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,
position=None, postfix=None, unit_divisor=1000, write_bytes=False,
lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,
**kwargs):
"""see tqdm.tqdm for arguments"""
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
| """
Customisable progressbar decorator for iterators.
Includes a default `range` iterator printing to `stderr`.
Usage:
>>> from tqdm import trange, tqdm
>>> for i in trange(10):
... ...
"""
__author__ = "https://github.com/tqdm/tqdm#contributions"
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
def TRLock(*args, **kwargs):
"""threading RLock"""
try:
return RLock(*args, **kwargs)
except (ImportError, OSError): # pragma: no cover
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
# global thread lock so no setup required for multithreading.
# NB: Do not create multiprocessing lock as it sets the multiprocessing
# context, disallowing `spawn()`/`forkserver()`
th_lock = TRLock()
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
cls = type(self)
root_lock = cls.th_lock
if root_lock is not None:
root_lock.acquire()
cls.create_mp_lock()
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
if root_lock is not None:
root_lock.release()
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
cls.mp_lock = RLock()
except (ImportError, OSError): # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
assert hasattr(cls, 'th_lock')
warn("create_th_lock not needed anymore", TqdmDeprecationWarning, stacklevel=2)
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(chr, range(0x258F, 0x2587, -1)))
BLANK = " "
COLOUR_RESET = '\x1b[0m'
COLOUR_RGB = '\x1b[38;2;%d;%d;%dm'
COLOURS = {'BLACK': '\x1b[30m', 'RED': '\x1b[31m', 'GREEN': '\x1b[32m',
'YELLOW': '\x1b[33m', 'BLUE': '\x1b[34m', 'MAGENTA': '\x1b[35m',
'CYAN': '\x1b[36m', 'WHITE': '\x1b[37m'}
def __init__(self, frac, default_len=10, charset=UTF, colour=None):
if not 0 <= frac <= 1:
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
self.colour = colour
@property
def colour(self):
return self._colour
@colour.setter
def colour(self, value):
if not value:
self._colour = None
return
try:
if value.upper() in self.COLOURS:
self._colour = self.COLOURS[value.upper()]
elif value[0] == '#' and len(value) == 7:
self._colour = self.COLOUR_RGB % tuple(
int(i, 16) for i in (value[1:3], value[3:5], value[5:7]))
else:
raise KeyError
except (KeyError, AttributeError):
warn("Unknown colour (%s); valid choices: [hex (#00ff00), %s]" % (
value, ", ".join(self.COLOURS)),
TqdmWarning, stacklevel=2)
self._colour = None
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = {'a': self.ASCII, 'u': self.UTF, 'b': self.BLANK}[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(int(self.frac * N_BARS * nsyms), nsyms)
res = charset[-1] * bar_length
if bar_length < N_BARS: # whitespace padding
res = res + charset[frac_bar_length] + charset[0] * (N_BARS - bar_length - 1)
return self.colour + res + self.COLOUR_RESET if self.colour else res
class EMA(object):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
smoothing : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields old value) to 1 (yields new value).
"""
def __init__(self, smoothing=0.3):
self.alpha = smoothing
self.last = 0
self.calls = 0
def __call__(self, x=None):
"""
Parameters
----------
x : float
New value to include in EMA.
"""
beta = 1 - self.alpha
if x is not None:
self.last = self.alpha * x + beta * self.last
self.calls += 1
return self.last / (1 - beta ** self.calls) if self.calls else self.last
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` and `nrows` to the
environment (allowing for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s, eta.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
Whether to write bytes. If (default: False) will write unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
nrows : int, optional
The screen height. If specified, hides nested bars outside this
bound. If unspecified, attempts to use environment height.
The fallback is 20.
colour : str, optional
Bar colour (e.g. 'green', '#00ff00').
delay : float, optional
Don't display until [default: 0] seconds have elapsed.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
_instances = WeakSet()
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
if fp in (sys.stderr, sys.stdout):
getattr(sys.stderr, 'flush', lambda: None)()
getattr(sys.stdout, 'flush', lambda: None)()
def fp_write(s):
fp.write(str(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = disp_len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',
unit_scale=False, rate=None, bar_format=None, postfix=None,
unit_divisor=1000, initial=0, colour=None, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s, eta.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
initial : int or float, optional
The initial counter value [default: 0].
colour : str, optional
Bar colour (e.g. 'green', '#00ff00').
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = (n - initial) / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'
rate_inv_fmt = (
(format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
try:
eta_dt = (datetime.now() + timedelta(seconds=remaining)
if rate and total else datetime.utcfromtimestamp(0))
except OverflowError:
eta_dt = datetime.max
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = {
# slight extension of self.format_dict
'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,
'elapsed': elapsed_str, 'elapsed_s': elapsed,
'ncols': ncols, 'desc': prefix or '', 'unit': unit,
'rate': inv_rate if inv_rate and inv_rate > 1 else rate,
'rate_fmt': rate_fmt, 'rate_noinv': rate,
'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,
'rate_inv_fmt': rate_inv_fmt,
'postfix': postfix, 'unit_divisor': unit_divisor,
'colour': colour,
# plus more useful definitions
'remaining': remaining_str, 'remaining_s': remaining,
'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,
**extra_kwargs}
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `{desc}`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar # no `{bar}`; nothing else to do
# Formatting progress bar space available for bar's display
full_bar = Bar(frac,
max(1, ncols - disp_len(nobar)) if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,
colour=colour)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = str(bar_format)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(0,
max(1, ncols - disp_len(nobar)) if ncols else 10,
charset=Bar.BLANK, colour=colour)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
else:
# no total: no progressbar, ETA, just progress stats
return (f'{(prefix + ": ") if prefix else ""}'
f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')
def __new__(cls, *_, **__):
instance = object.__new__(cls)
with cls.get_lock(): # also constructs lock if non-existent
cls._instances.add(instance)
# create monitoring thread
if cls.monitor_interval and (cls.monitor is None
or not cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = {abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos")}
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition another unfixed bar
to fill the new gap.
This means that by default (where all nested bars are unfixed),
order is not maintained but screen flicker/blank space is minimised.
(tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
last = (instance.nrows or 20) - 1
# find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)
instances = list(filter(
lambda i: hasattr(i, "pos") and last <= i.pos,
cls._instances))
# set first found to current `pos`
if instances:
inst = min(instances, key=lambda i: i.pos)
inst.clear(nolock=True)
inst.pos = abs(instance.pos)
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
try:
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
finally:
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(cls, **tqdm_kwargs):
"""
Registers the current `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be created every time `progress_apply` is called,
and each instance will automatically `close()` upon completion.
Parameters
----------
tqdm_kwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
<https://stackoverflow.com/questions/18603270/\
progress-indicator-during-pandas-operations-python>
"""
try:
with catch_warnings():
simplefilter("ignore", category=FutureWarning)
except ImportError: # pandas>=1.2.0
Panel = None
Rolling, Expanding = None, None
try: # pandas>=1.0.0
except ImportError:
try: # pandas>=0.18.0
except ImportError: # pandas>=1.2.0
try: # pandas>=1.2.0
_Rolling_and_Expanding = Rolling, Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
except ImportError: # pragma: no cover
try: # pandas>=0.23.0
except ImportError:
try: # pandas>=0.23.0
except ImportError: # pragma: no cover
try: # pandas>=0.23.0
except ImportError:
try:
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
tqdm_kwargs = tqdm_kwargs.copy()
deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif (_Rolling_and_Expanding is None or
not isinstance(df, _Rolling_and_Expanding)):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = cls(total=total, **tqdm_kwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try: # pandas>=1.3.0
except ImportError:
is_builtin_func = df._is_builtin_func
try:
func = is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
try:
return getattr(df, df_function)(wrapper, **kwargs)
finally:
t.close()
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if Rolling is not None and Expanding is not None:
Rolling.progress_apply = inner_generator()
Expanding.progress_apply = inner_generator()
elif _Rolling_and_Expanding is not None:
_Rolling_and_Expanding.progress_apply = inner_generator()
# override defaults via env vars
@envwrap("TQDM_", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,
'position': int, 'nrows': int})
def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,
ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,
ascii=None, disable=False, unit='it', unit_scale=False,
dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,
position=None, postfix=None, unit_divisor=1000, write_bytes=False,
lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,
**kwargs):
"""see tqdm.tqdm for arguments"""
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them. | file = SimpleTextIOWrapper( | 5 | 2023-12-14 07:43:03+00:00 | 8k |
LkPrtctrd/BSL-V53 | Heart/Record/ByteStream.py | [
{
"identifier": "ByteStreamHelper",
"path": "Heart/Record/ByteStreamHelper.py",
"snippet": "class ByteStreamHelper:\n def readDataReference(self):\n result = []\n result.append(self.readVInt())\n if not result[0]:\n return None\n result.append(self.readVInt())\n... | import zlib
from Heart.Record.ByteStreamHelper import ByteStreamHelper
from Heart.Record.ChecksumEncoder import ChecksumEncoder
from Heart.Logic.LogicStringUtil import LogicStringUtil
from Heart.Record.Debugger import Debugger
from Heart.Logic.LogicLong import LogicLong | 3,986 | return 5
elif value > -2199023255552:
return 6
elif value > -281474976710656:
return 7
elif value > -36028797018963968:
return 8
elif value > -4611686018427387903:
return 9
else:
return 10
else:
if value < 64:
return 1
elif value < 8192:
return 2
elif value < 1048576:
return 3
elif value < 134217727:
return 4
elif value < 17179869184:
return 5
elif value < 2199023255552:
return 6
elif value < 281474976710656:
return 7
elif value < 36028797018963968:
return 8
elif value < 4611686018427387903:
return 9
else:
return 10
def isAtEnd(self):
return len(self.messagePayload) <= self.offset
@staticmethod
def isByteStream():
return True
@staticmethod
def isCheckSumOnlyMode():
return False
def readBoolean(self):
bitoffset = self.bitoffset
offset = self.offset + (8 - bitoffset >> 3)
self.offset = offset
self.bitoffset = bitoffset + 1 & 7
return (1 << (bitoffset & 31) & self.messagePayload[offset - 1]) != 0
def readByte(self):
self.bitoffset = 0
result = self.messagePayload[self.offset]
self.offset += 1
return result
def readBytes(self, length, max=1000):
self.bitoffset = 0
if (length & 0x80000000) != 0:
if length != -1:
Debugger.warning("Negative readBytes length encountered.")
elif length <= max:
result = self.messagePayload[self.offset:self.offset + length]
self.offset += length
return bytes(result)
else:
Debugger.warning("readBytes too long array, max", max)
return b''
def readBytesLength(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 24)
result += (self.messagePayload[self.offset + 1] << 16)
result += (self.messagePayload[self.offset + 2] << 8)
result += (self.messagePayload[self.offset + 3])
self.offset += 4
return result
def readInt8(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset])
self.offset += 1
return result
def readInt16(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 8)
result += (self.messagePayload[self.offset + 1])
self.offset += 2
return result
def readInt24(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 16)
result += (self.messagePayload[self.offset + 1] << 8)
result += (self.messagePayload[self.offset + 2])
self.offset += 3
return result
def readInt(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 24)
result += (self.messagePayload[self.offset + 1] << 16)
result += (self.messagePayload[self.offset + 2] << 8)
result += (self.messagePayload[self.offset + 3])
self.offset += 4
return result
def readIntLittleEndian(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset])
result += (self.messagePayload[self.offset + 1] << 8)
result += (self.messagePayload[self.offset + 2] << 16)
result += (self.messagePayload[self.offset + 3] << 24)
self.offset += 4
return result
def readLong(self, logicLong=None):
if not logicLong:
|
class ByteStream(ChecksumEncoder):
def __init__(self, messageBuffer, unknown=0):
super().__init__()
self.messagePayload = messageBuffer
self.bitoffset = 0
self.offset = 0
self.length = len(self.messagePayload)
def clear(self, length):
if self.messagePayload:
self.messagePayload = b''
self.bitoffset = 0
self.offset = 0
def destroy(self):
self.messagePayload = None
self.bitoffset = 0
self.offset = 0
self.length = 0
def ensureCapacity(self, length):
offset = self.offset
if len(self.messagePayload) < offset + length:
buffer_copy = self.messagePayload
buf_len = length
self.length = buf_len
self.messagePayload += bytes([0] * buf_len)
def writeHexa(self, data, length):
self.bitoffset = 0
if data:
if data.startswith('0x'):
data = data[2:]
self.messagePayload += bytes.fromhex(''.join(data.split()).replace('-', ''))
self.offset += length
def getBitOffset(self):
return self.bitoffset
def getByteArray(self):
return self.messagePayload
def getCapacityIncrement(self):
return 100
def getDataPointer(self):
return self.messagePayload[self.offset]
def getLength(self):
length = self.length
if self.length <= self.offset:
length = self.offset
return length
def getOffset(self):
return self.offset
@staticmethod
def getVIntSizeInBytes(value):
if value < 0:
if value > -64:
return 1
elif value > -8192:
return 2
elif value > -1048576:
return 3
elif value > -134217727:
return 4
else:
return 5
else:
if value < 64:
return 1
elif value < 8192:
return 2
elif value < 1048576:
return 3
elif value < 134217727:
return 4
else:
return 5
@staticmethod
def getVLongSizeInBytes(value):
if value < 0:
if value > -64:
return 1
elif value > -8192:
return 2
elif value > -1048576:
return 3
elif value > -134217727:
return 4
elif value > -17179869184:
return 5
elif value > -2199023255552:
return 6
elif value > -281474976710656:
return 7
elif value > -36028797018963968:
return 8
elif value > -4611686018427387903:
return 9
else:
return 10
else:
if value < 64:
return 1
elif value < 8192:
return 2
elif value < 1048576:
return 3
elif value < 134217727:
return 4
elif value < 17179869184:
return 5
elif value < 2199023255552:
return 6
elif value < 281474976710656:
return 7
elif value < 36028797018963968:
return 8
elif value < 4611686018427387903:
return 9
else:
return 10
def isAtEnd(self):
return len(self.messagePayload) <= self.offset
@staticmethod
def isByteStream():
return True
@staticmethod
def isCheckSumOnlyMode():
return False
def readBoolean(self):
bitoffset = self.bitoffset
offset = self.offset + (8 - bitoffset >> 3)
self.offset = offset
self.bitoffset = bitoffset + 1 & 7
return (1 << (bitoffset & 31) & self.messagePayload[offset - 1]) != 0
def readByte(self):
self.bitoffset = 0
result = self.messagePayload[self.offset]
self.offset += 1
return result
def readBytes(self, length, max=1000):
self.bitoffset = 0
if (length & 0x80000000) != 0:
if length != -1:
Debugger.warning("Negative readBytes length encountered.")
elif length <= max:
result = self.messagePayload[self.offset:self.offset + length]
self.offset += length
return bytes(result)
else:
Debugger.warning("readBytes too long array, max", max)
return b''
def readBytesLength(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 24)
result += (self.messagePayload[self.offset + 1] << 16)
result += (self.messagePayload[self.offset + 2] << 8)
result += (self.messagePayload[self.offset + 3])
self.offset += 4
return result
def readInt8(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset])
self.offset += 1
return result
def readInt16(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 8)
result += (self.messagePayload[self.offset + 1])
self.offset += 2
return result
def readInt24(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 16)
result += (self.messagePayload[self.offset + 1] << 8)
result += (self.messagePayload[self.offset + 2])
self.offset += 3
return result
def readInt(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset] << 24)
result += (self.messagePayload[self.offset + 1] << 16)
result += (self.messagePayload[self.offset + 2] << 8)
result += (self.messagePayload[self.offset + 3])
self.offset += 4
return result
def readIntLittleEndian(self):
self.bitoffset = 0
result = (self.messagePayload[self.offset])
result += (self.messagePayload[self.offset + 1] << 8)
result += (self.messagePayload[self.offset + 2] << 16)
result += (self.messagePayload[self.offset + 3] << 24)
self.offset += 4
return result
def readLong(self, logicLong=None):
if not logicLong: | logicLong = LogicLong(0, 0) | 4 | 2023-12-14 18:57:56+00:00 | 8k |
sockheadrps/AIODesa | tests/test_Database.py | [
{
"identifier": "Db",
"path": "aiodesa/database.py",
"snippet": "class Db:\n \"\"\"\n Represents a simple SQLite database interface.\n\n Args:\n db_path : str\n The path to the SQLite database file.\n\n\n Example:\n\n .. code-block:: python\n\n class U... | import pytest
import aiosqlite
import secrets
from aiodesa import Db
from aiodesa.utils.table import make_schema
from dataclasses import dataclass, fields
from pathlib import Path | 4,587 |
@pytest.fixture
def db_path():
"""
DB path initializer
"""
return "test.sqlite3"
@pytest.fixture(scope="session", autouse=True)
def name():
"""
DB name initializer
_ is to satisfy SQL convention in case secrets returns a string with a numeric in position 0
"""
return "_" + secrets.token_hex(16)
def delete_test_db(db_path):
"""
For tearing down test
"""
file_name = db_path
parent_folder = Path.cwd()
file_path = parent_folder / file_name
file_path = Path(file_path)
if file_path.exists():
file_path.unlink()
@pytest.fixture
def test_data_fixture(name):
"""
Fixture for testing DB from dataclass
"""
@dataclass
class TestData:
test_column: str | None = None
test_column_two: str | None = None
table_name: str = name
return TestData
@pytest.mark.asyncio
async def test_db_init(db_path):
"""
Tests the creation of the following class attributes
self.db_path = Path(db_path)
self._conn = None
self._create_db()
self._tables = {}
"""
db = Db(db_path)
db_path = Path(db.db_path)
assert db_path.is_file()
assert db._conn == None
assert isinstance(db._tables, dict)
@pytest.mark.asyncio
async def test_read_table_schemas_single_dataclass(test_data_fixture, db_path, name):
"""
Tests creation of table from single data class
"""
single_data_class = test_data_fixture
async with Db(db_path) as db:
await db.read_table_schemas(single_data_class)
assert await db._table_exists(name)
@pytest.mark.asyncio
async def test_read_table_schemas_tuple_of_dataclasses(db_path):
"""
Tests creation of tables from tuple of data classes
"""
table_one_name = "_" + secrets.token_hex(16)
table_two_name = "_" + secrets.token_hex(16)
@dataclass
class Dataclass1:
id: int
name: str
table_name: str = table_one_name
@dataclass
class Dataclass2:
id: int
value: float
table_name: str = table_two_name
async with Db(db_path) as db:
await db.read_table_schemas((Dataclass1, Dataclass2))
assert await db._table_exists(table_one_name)
assert await db._table_exists(table_two_name)
@pytest.mark.asyncio
async def test_table_exists(db_path, test_data_fixture, name):
"""
Tests that the internal method _table_exists returns if tables exist or not
"""
async with Db(db_path) as db:
assert not await db._table_exists("nonexistent_table")
await db.read_table_schemas(test_data_fixture)
assert await db._table_exists(name)
@pytest.mark.asyncio
async def test_create_table(test_data_fixture, db_path):
"""
Tests that _create_table actually creates the table. Test is done with raw sql, not by testing against
internal class methods.
"""
class_fields = fields(test_data_fixture)
db = Db(db_path)
for field in class_fields:
if field.name == "table_name":
| # tests/test_database.py
@pytest.fixture
def db_path():
"""
DB path initializer
"""
return "test.sqlite3"
@pytest.fixture(scope="session", autouse=True)
def name():
"""
DB name initializer
_ is to satisfy SQL convention in case secrets returns a string with a numeric in position 0
"""
return "_" + secrets.token_hex(16)
def delete_test_db(db_path):
"""
For tearing down test
"""
file_name = db_path
parent_folder = Path.cwd()
file_path = parent_folder / file_name
file_path = Path(file_path)
if file_path.exists():
file_path.unlink()
@pytest.fixture
def test_data_fixture(name):
"""
Fixture for testing DB from dataclass
"""
@dataclass
class TestData:
test_column: str | None = None
test_column_two: str | None = None
table_name: str = name
return TestData
@pytest.mark.asyncio
async def test_db_init(db_path):
"""
Tests the creation of the following class attributes
self.db_path = Path(db_path)
self._conn = None
self._create_db()
self._tables = {}
"""
db = Db(db_path)
db_path = Path(db.db_path)
assert db_path.is_file()
assert db._conn == None
assert isinstance(db._tables, dict)
@pytest.mark.asyncio
async def test_read_table_schemas_single_dataclass(test_data_fixture, db_path, name):
"""
Tests creation of table from single data class
"""
single_data_class = test_data_fixture
async with Db(db_path) as db:
await db.read_table_schemas(single_data_class)
assert await db._table_exists(name)
@pytest.mark.asyncio
async def test_read_table_schemas_tuple_of_dataclasses(db_path):
"""
Tests creation of tables from tuple of data classes
"""
table_one_name = "_" + secrets.token_hex(16)
table_two_name = "_" + secrets.token_hex(16)
@dataclass
class Dataclass1:
id: int
name: str
table_name: str = table_one_name
@dataclass
class Dataclass2:
id: int
value: float
table_name: str = table_two_name
async with Db(db_path) as db:
await db.read_table_schemas((Dataclass1, Dataclass2))
assert await db._table_exists(table_one_name)
assert await db._table_exists(table_two_name)
@pytest.mark.asyncio
async def test_table_exists(db_path, test_data_fixture, name):
"""
Tests that the internal method _table_exists returns if tables exist or not
"""
async with Db(db_path) as db:
assert not await db._table_exists("nonexistent_table")
await db.read_table_schemas(test_data_fixture)
assert await db._table_exists(name)
@pytest.mark.asyncio
async def test_create_table(test_data_fixture, db_path):
"""
Tests that _create_table actually creates the table. Test is done with raw sql, not by testing against
internal class methods.
"""
class_fields = fields(test_data_fixture)
db = Db(db_path)
for field in class_fields:
if field.name == "table_name": | schema_ = make_schema(str(field.default), test_data_fixture) | 1 | 2023-12-09 05:52:25+00:00 | 8k |
DavidBellamy/labrador | scripts/preprocessing/pretraining_raw_data_to_labrador_jsonl.py | [
{
"identifier": "mimic4_eCDFer",
"path": "lab_transformers/data/tokenize_tabular_data.py",
"snippet": "class mimic4_eCDFer:\n def __init__(self, ecdf_data: np.lib.npyio.NpzFile) -> None:\n \"\"\"\n Maps an iterable of lab codes and and an iterable of corresponding lab values to their pr... | import json
import os
import os.path as op
import sqlite3
import sys
import numpy as np
import pandas as pd
from itertools import groupby
from typing import Dict, Tuple, Union
from numpy.typing import NDArray
from statsmodels.distributions import ECDF
from tqdm import tqdm
from lab_transformers.data.tokenize_tabular_data import mimic4_eCDFer
from lab_transformers.utils import NpEncoder | 3,926 | test_df = df[
(df.subject_id.isin(test_patients)) & (df.itemid.isin(train_itemids))
]
return {
"train_patients": train_patients,
"val_patients": val_patients,
"test_patients": test_patients,
}, {"train_df": train_df, "val_df": val_df, "test_df": test_df}
def probability_transform_values(
self, splits: Dict[str, pd.DataFrame]
) -> Tuple[pd.DataFrame]:
train_df = splits["train_df"]
val_df = splits["val_df"]
test_df = splits["test_df"]
unique_itemids = train_df.itemid.unique()
compressed_ecdf_data = {}
for itemid in tqdm(unique_itemids, desc="Computing eCDFs"):
lab_values = train_df[
~np.isnan(train_df.valuenum) & (train_df.itemid == itemid)
]["valuenum"].values
if len(lab_values) == 0:
continue
# Calculate the empirical CDF for the current lab test
ecdf = ECDF(lab_values)
# Compress the eCDF to just the unique lab values (and their probabilities)
unique_ixs = []
cum_lengths = 0
for _, g in groupby(ecdf.x):
group = list(g)
cum_lengths += len(group)
unique_ix = cum_lengths - 1
unique_ixs.append(unique_ix)
# Store the resulting compressed eCDF data
compressed_ecdf_data[f"{itemid}_x"] = ecdf.x[unique_ixs]
compressed_ecdf_data[f"{itemid}_y"] = ecdf.y[unique_ixs]
# Save the compressed eCDF values and probabilities
np.savez(op.join(self.output_path, "mimic4_ecdfs.npz"), **compressed_ecdf_data)
# Load the result back and use it to probability transform the validation and test data splits
ecdf_data = np.load(op.join(self.output_path, "mimic4_ecdfs.npz"))
eCDFer = mimic4_eCDFer(ecdf_data)
# Apply the training eCDFer to data splits
train_df["probs"] = eCDFer(train_df["itemid"], train_df["valuenum"])
val_df["probs"] = eCDFer(val_df["itemid"], val_df["valuenum"])
test_df["probs"] = eCDFer(test_df["itemid"], test_df["valuenum"])
return train_df, val_df, test_df
def write_json_lines(
self,
patient_dict: Dict[str, NDArray[np.integer]],
train_df: pd.DataFrame,
val_df: pd.DataFrame,
test_df: pd.DataFrame,
test_number: Union[int, None] = None,
) -> None:
train_patients = patient_dict["train_patients"]
val_patients = patient_dict["val_patients"]
test_patients = patient_dict["test_patients"]
# Create the output paths for the 3 data splits
train_jsonl_file = os.path.join(
self.output_path, f"labrador_train_patients{test_number}.jsonl"
)
val_jsonl_file = os.path.join(
self.output_path, f"labrador_validation_patients{test_number}.jsonl"
)
test_jsonl_file = os.path.join(
self.output_path, f"labrador_test_patients{test_number}.jsonl"
)
# Write the 3 data splits to their respective paths
self.json_lines_writer(train_jsonl_file, train_patients, train_df, "training")
self.json_lines_writer(val_jsonl_file, val_patients, val_df, "validation")
self.json_lines_writer(test_jsonl_file, test_patients, test_df, "testing")
def json_lines_writer(
self,
filepath: str,
patient_list: NDArray[np.integer],
df: pd.DataFrame,
name: str,
) -> None:
# Generate JSON lines and write to train_set.jsonl, val_set.jsonl, and test_set.jsonl at output_path
first_line = True
mode = "w"
# Make an index out of subject_id for faster subsetting of the df
df.set_index("subject_id", inplace=True)
for patient in tqdm(patient_list, desc=f"Writing {name} JSON lines..."):
temp = df.loc[df.index == patient]
# Filter out patients that only have a single lab (no bag to learn context from)
if len(temp) < 2:
continue # skip this patient
# Create individual patient JSON line
patient_jsonl = {
"subject_id": patient,
"lab_codes": [
self.frequency_ranks[code] for code in temp.itemid.values
],
"lab_values": temp.probs.fillna("<NULL>").values.tolist(),
"time_deltas": temp.time_delta.values.tolist(),
"hadm_id": temp.hadm_id.values.tolist(),
"charttime": np.datetime_as_string(temp.charttime, unit="m").tolist(),
}
# Write it to file
with open(filepath, mode=mode, encoding="utf-8") as f:
| """
This script loads the MIMIC-IV labevents.csv data at data_path, and creates a JSON line for each patient that contains:
subject_id, lab_codes (encoded as their frequency ranking), lab_values (after eCDF transformation), time_deltas,
hospital admission ID (hadm_id), and charttime of the lab test,
where time_deltas are the time in days (float) between each lab measurement.
A <NULL> string is used in the place of labs without numeric values/entries.
"""
class MakeJSONlines:
def __init__(
self,
raw_lab_data_file_name: str,
raw_admissions_data_file_name: str,
data_path: str,
output_path: str,
random_seed: int,
train_pct: float,
val_pct: float,
test_pct: float,
min_frequency: int,
) -> None:
self.raw_labfile = raw_lab_data_file_name
self.raw_admissionsfile = raw_admissions_data_file_name
self.data_path = data_path
self.output_path = output_path
self.random_seed = random_seed
self.train_pct = train_pct
self.val_pct = val_pct
self.test_pct = test_pct
self.min_frequency = min_frequency
# Create a controllabe RNG for data splitting
self.rng = np.random.default_rng(self.random_seed)
# Initialize attribute for holding the frequency ranks of the categorical vocabulary
# This is filled in by the compute_frequency_ranks() method
self.frequency_ranks = None
def call(self, test_number: Union[int, None] = None) -> None:
print("Loading raw data...\n")
df, admissions = self.load_data()
print("Filtering low frequency lab tests...\n")
df = self.filter_rare_categorical(df)
print("Merging in hadm_id's for each lab test...\n")
df = self.merge_in_hadm_id_from_admissions(df, admissions)
print("Computing frequency rankings of lab codes...\n")
self.frequency_ranks = self.compute_frequency_ranks(df)
print("Computing time deltas between labs...\n")
df = self.compute_time_delta(df)
print("Splitting data into train, validation, test...\n")
patient_dict, data_dict = self.split_data(df)
print("Transforming lab values into probabilities via the eCDF...\n")
train_df, val_df, test_df = self.probability_transform_values(data_dict)
print("Writing JSON lines to disk...\n")
self.write_json_lines(patient_dict, train_df, val_df, test_df, test_number)
def load_data(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
# Load labevents.csv (requires ~32Gb of memory)
lab_data_path = os.path.join(self.data_path, self.raw_labfile)
labevents = pd.read_csv(
lab_data_path,
dtype={
"labevent_id": int,
"subject_id": int,
"hadm_id": "Int64", # Pandas nullable Int type
"specimen_id": int,
"itemid": int,
"charttime": "string",
"storetime": "string",
"value": object,
"valuenum": float,
"valueuom": "string",
"ref_range_lower": float,
"ref_range_upper": float,
"flag": "string",
"priority": "string",
"comments": "string",
},
)
# Subset to only the columns needed for json lines
columns_needed = ["subject_id", "itemid", "valuenum", "charttime", "hadm_id"]
df = labevents[columns_needed]
# Load admissions.csv (will be used to merge in hadm_id's for each lab test)
admissions_data_path = os.path.join(self.data_path, self.raw_admissionsfile)
admissions = pd.read_csv(admissions_data_path)
admissions["admittime"] = pd.to_datetime(admissions["admittime"])
admissions["dischtime"] = pd.to_datetime(admissions["dischtime"])
admissions = admissions[
["subject_id", "hadm_id", "admittime", "dischtime", "edregtime"]
] # subset the necessary cols
return df, admissions
def filter_rare_categorical(self, raw_lab_data: pd.DataFrame) -> pd.DataFrame:
# Filter out itemid's with insufficient frequency
# Note: the first filter condition selects lab codes that have no numeric values but occur >= MIN_FREQUENCY times
# the second filter condition selects lab codes that have numeric values,
# and both the numeric values and codes occur >= MIN_FREQUENCY times
filtered_lab_data = raw_lab_data.groupby("itemid").filter(
lambda x: (
len(x["itemid"]) >= self.min_frequency
and len(x["valuenum"].dropna()) == 0
)
or (
len(x["valuenum"].dropna()) >= self.min_frequency
and len(x["itemid"]) >= self.min_frequency
)
)
return filtered_lab_data
def merge_in_hadm_id_from_admissions(
self, df: pd.DataFrame, admissions: pd.DataFrame
) -> pd.DataFrame:
# Make the db in memory (requires 90+ Gb of memory with full labevents.csv)
conn = sqlite3.connect(":memory:")
# write the tables
df.to_sql("df", conn, index=False)
admissions.to_sql("admissions", conn, index=False)
qry = """select
df.subject_id,
itemid,
valuenum,
charttime,
df.hadm_id labs_hadm_id,
admissions.hadm_id adm_hadm_id
from df left join admissions
on ((charttime between case
when edregtime is not null then min(edregtime, admittime)
else admittime end
and dischtime)
and admissions.subject_id = df.subject_id)"""
# Perform the SQL merge/join
df = pd.read_sql_query(qry, conn)
# Drop rows where both hadm_id's exist but aren't equal (only ~0.01% of rows have this)
df = df[
~(
(df.labs_hadm_id != df.adm_hadm_id)
& ~(df.labs_hadm_id.isnull())
& ~(df.adm_hadm_id.isnull())
)
]
# Merge the two hadm_id columns together
df["hadm_id"] = df["labs_hadm_id"].fillna(df["adm_hadm_id"])
# Drop the labs_hadm_id and adm_hadm_id columns
df = df.drop(["labs_hadm_id", "adm_hadm_id"], axis=1)
return df
def compute_frequency_ranks(self, raw_lab_data: pd.DataFrame) -> Dict[str, int]:
# Next, we will determine the integer frequency rank of each lab code in the raw data
# compute frequency of each unique lab code
labcode_freqs = dict(raw_lab_data.itemid.value_counts())
# replace frequencies of lab codes with their integer rank (ranks start at 1)
frequency_ranks = {}
for i, (key, value) in enumerate(labcode_freqs.items()):
frequency_ranks[key] = i + 1
# Save the map from MIMIC-IV lab codes to their frequency ranks (useful for getting descriptions of lab codes)
codebook = pd.DataFrame.from_dict(frequency_ranks, orient="index").reset_index()
codebook.columns = ["itemid", "frequency_rank"]
d_labitems = os.path.join(self.data_path, "d_labitems.csv")
labitem_descriptions = pd.read_csv(
d_labitems
) # load descriptions of each lab code
codebook = codebook.merge(
labitem_descriptions, on="itemid"
) # merge the descriptions with the codebook
filename = os.path.join(self.output_path, "labcode_codebook_labrador.csv")
codebook.to_csv(filename, index=False) # save the codebook
return frequency_ranks
def compute_time_delta(self, df: pd.DataFrame) -> pd.DataFrame:
# Convert charttime Pandas datetime (for computing time deltas later)
df["charttime"] = pd.to_datetime(df["charttime"])
# Sort by subject_id and charttime (ascending)
df = df.sort_values(["subject_id", "charttime"], inplace=False)
# calculate time deltas (next time minus previous time)
df["time_delta"] = df.charttime - df.charttime.shift(1)
# correct rows at border between 2 patients (replace with 0)
df.loc[(df.subject_id != df.subject_id.shift(1)), "time_delta"] = pd.Timedelta(
"0 days"
)
# Convert time_delta's to decimal days (e.g. 5.35 days)
df["time_delta"] = df["time_delta"].dt.total_seconds() / (60 * 60 * 24)
return df
def split_data(
self, df: pd.DataFrame
) -> Tuple[Dict[str, NDArray[np.integer]], Dict[str, pd.DataFrame]]:
# Sort patients into train/validation/test sets
patient_list = df.subject_id.unique()
# Shuffle the order of patients
self.rng.shuffle(patient_list)
train_size = int(np.floor(self.train_pct * len(patient_list)))
val_size = int(np.ceil(self.val_pct * len(patient_list)))
test_size = int(len(patient_list) - train_size - val_size)
train_patients = patient_list[:train_size]
val_patients = patient_list[train_size : train_size + val_size]
test_patients = patient_list[train_size + val_size :]
# Split out the training data
train_df = df[df.subject_id.isin(train_patients)]
# Extract the unique itemid's from the training data partition
train_itemids = train_df.itemid.unique()
# Split out the val/test sets if the itemid also exists in the training data
val_df = df[
(df.subject_id.isin(val_patients)) & (df.itemid.isin(train_itemids))
]
test_df = df[
(df.subject_id.isin(test_patients)) & (df.itemid.isin(train_itemids))
]
return {
"train_patients": train_patients,
"val_patients": val_patients,
"test_patients": test_patients,
}, {"train_df": train_df, "val_df": val_df, "test_df": test_df}
def probability_transform_values(
self, splits: Dict[str, pd.DataFrame]
) -> Tuple[pd.DataFrame]:
train_df = splits["train_df"]
val_df = splits["val_df"]
test_df = splits["test_df"]
unique_itemids = train_df.itemid.unique()
compressed_ecdf_data = {}
for itemid in tqdm(unique_itemids, desc="Computing eCDFs"):
lab_values = train_df[
~np.isnan(train_df.valuenum) & (train_df.itemid == itemid)
]["valuenum"].values
if len(lab_values) == 0:
continue
# Calculate the empirical CDF for the current lab test
ecdf = ECDF(lab_values)
# Compress the eCDF to just the unique lab values (and their probabilities)
unique_ixs = []
cum_lengths = 0
for _, g in groupby(ecdf.x):
group = list(g)
cum_lengths += len(group)
unique_ix = cum_lengths - 1
unique_ixs.append(unique_ix)
# Store the resulting compressed eCDF data
compressed_ecdf_data[f"{itemid}_x"] = ecdf.x[unique_ixs]
compressed_ecdf_data[f"{itemid}_y"] = ecdf.y[unique_ixs]
# Save the compressed eCDF values and probabilities
np.savez(op.join(self.output_path, "mimic4_ecdfs.npz"), **compressed_ecdf_data)
# Load the result back and use it to probability transform the validation and test data splits
ecdf_data = np.load(op.join(self.output_path, "mimic4_ecdfs.npz"))
eCDFer = mimic4_eCDFer(ecdf_data)
# Apply the training eCDFer to data splits
train_df["probs"] = eCDFer(train_df["itemid"], train_df["valuenum"])
val_df["probs"] = eCDFer(val_df["itemid"], val_df["valuenum"])
test_df["probs"] = eCDFer(test_df["itemid"], test_df["valuenum"])
return train_df, val_df, test_df
def write_json_lines(
self,
patient_dict: Dict[str, NDArray[np.integer]],
train_df: pd.DataFrame,
val_df: pd.DataFrame,
test_df: pd.DataFrame,
test_number: Union[int, None] = None,
) -> None:
train_patients = patient_dict["train_patients"]
val_patients = patient_dict["val_patients"]
test_patients = patient_dict["test_patients"]
# Create the output paths for the 3 data splits
train_jsonl_file = os.path.join(
self.output_path, f"labrador_train_patients{test_number}.jsonl"
)
val_jsonl_file = os.path.join(
self.output_path, f"labrador_validation_patients{test_number}.jsonl"
)
test_jsonl_file = os.path.join(
self.output_path, f"labrador_test_patients{test_number}.jsonl"
)
# Write the 3 data splits to their respective paths
self.json_lines_writer(train_jsonl_file, train_patients, train_df, "training")
self.json_lines_writer(val_jsonl_file, val_patients, val_df, "validation")
self.json_lines_writer(test_jsonl_file, test_patients, test_df, "testing")
def json_lines_writer(
self,
filepath: str,
patient_list: NDArray[np.integer],
df: pd.DataFrame,
name: str,
) -> None:
# Generate JSON lines and write to train_set.jsonl, val_set.jsonl, and test_set.jsonl at output_path
first_line = True
mode = "w"
# Make an index out of subject_id for faster subsetting of the df
df.set_index("subject_id", inplace=True)
for patient in tqdm(patient_list, desc=f"Writing {name} JSON lines..."):
temp = df.loc[df.index == patient]
# Filter out patients that only have a single lab (no bag to learn context from)
if len(temp) < 2:
continue # skip this patient
# Create individual patient JSON line
patient_jsonl = {
"subject_id": patient,
"lab_codes": [
self.frequency_ranks[code] for code in temp.itemid.values
],
"lab_values": temp.probs.fillna("<NULL>").values.tolist(),
"time_deltas": temp.time_delta.values.tolist(),
"hadm_id": temp.hadm_id.values.tolist(),
"charttime": np.datetime_as_string(temp.charttime, unit="m").tolist(),
}
# Write it to file
with open(filepath, mode=mode, encoding="utf-8") as f: | json_record = json.dumps(patient_jsonl, cls=NpEncoder) | 1 | 2023-12-09 20:40:17+00:00 | 8k |
NLP-Core-Team/RealCode_eval | main.py | [
{
"identifier": "InfillGenerator",
"path": "lm_eval/generators.py",
"snippet": "class InfillGenerator:\n def __init__(self, \n model_path: str,\n num_samples: int,\n prefix_tokens: tp.Union[str, tp.List[int]] = [],\n middle_tokens: tp.Union[str, tp.List[int]] = [],\n ... | import hydra
import torch
import numpy as np
import random
import json
import os
import logging
from lm_eval.generators import InfillGenerator, LMGenerator
from lm_eval.evaluator import Evaluator
from lm_eval.context_parser import TrivialContextParser
from lm_eval.utils import load_dataset
from omegaconf import DictConfig, OmegaConf | 3,770 |
logger = logging.getLogger("RealCode")
logger.setLevel(logging.DEBUG)
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@hydra.main(config_path="config", config_name="config")
def main(cfg: DictConfig) -> None:
seed_all(cfg.seed)
print(cfg)
dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
if 'context_parser' in cfg:
parser = hydra.utils.instantiate(cfg.context_parser)
else:
parser = TrivialContextParser()
dtype_map = {'fp16': torch.float16, 'fp32': torch.float, 'bf16': torch.bfloat16}
if cfg.generator_mode == 'infill':
generator = InfillGenerator(
add_extra_spaces_to_begin=0,
model_path=cfg.model_path,
dtype=dtype_map[cfg.dtype],
num_samples=cfg.num_samples,
prefix_tokens=cfg.prefix_tokens,
middle_tokens=cfg.middle_tokens,
suffix_tokens=cfg.suffix_tokens,
max_context_length=cfg.max_context_length,
generation_params=dict(cfg.generation_params),
eos_sequences=cfg.eos_sequences,
model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {},
context_parser=parser,
left_context_ratio=cfg.left_context_ratio,
add_extra_spaces_to_generation=cfg.tokenizer_fix
)
elif cfg.generator_mode == 'lm':
|
logger = logging.getLogger("RealCode")
logger.setLevel(logging.DEBUG)
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@hydra.main(config_path="config", config_name="config")
def main(cfg: DictConfig) -> None:
seed_all(cfg.seed)
print(cfg)
dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
if 'context_parser' in cfg:
parser = hydra.utils.instantiate(cfg.context_parser)
else:
parser = TrivialContextParser()
dtype_map = {'fp16': torch.float16, 'fp32': torch.float, 'bf16': torch.bfloat16}
if cfg.generator_mode == 'infill':
generator = InfillGenerator(
add_extra_spaces_to_begin=0,
model_path=cfg.model_path,
dtype=dtype_map[cfg.dtype],
num_samples=cfg.num_samples,
prefix_tokens=cfg.prefix_tokens,
middle_tokens=cfg.middle_tokens,
suffix_tokens=cfg.suffix_tokens,
max_context_length=cfg.max_context_length,
generation_params=dict(cfg.generation_params),
eos_sequences=cfg.eos_sequences,
model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {},
context_parser=parser,
left_context_ratio=cfg.left_context_ratio,
add_extra_spaces_to_generation=cfg.tokenizer_fix
)
elif cfg.generator_mode == 'lm': | generator = LMGenerator( | 1 | 2023-12-12 12:43:06+00:00 | 8k |
ENDEVSOLS/Long-Trainer | longtrainer/trainer.py | [
{
"identifier": "DocumentLoader",
"path": "longtrainer/loaders.py",
"snippet": "class DocumentLoader:\n def load_csv(self, path):\n \"\"\"\n Load data from a CSV file at the specified path.\n\n Args:\n path (str): The file path to the CSV file.\n\n Returns:\n ... | from longtrainer.loaders import DocumentLoader, TextSplitter
from longtrainer.retrieval import DocRetriever
from longtrainer.bot import ChainBot
from longtrainer.vision_bot import VisionMemory, VisionBot
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from pymongo import MongoClient
import uuid | 3,935 |
class LongTrainer:
def __init__(self, mongo_endpoint='mongodb://localhost:27017/', llm=None,
embedding_model=None,
prompt_template=None, max_token_limit=32000):
"""
Initialize the LongTrainer with optional language learning model, embedding model,
prompt template, maximum token limit, and MongoDB endpoint.
Args:
mongo_endpoint (str): MongoDB connection string.
llm: Language learning model, defaults to ChatOpenAI with GPT-4.
embedding_model: Embedding model for document vectorization, defaults to OpenAIEmbeddings.
prompt_template: Template for generating prompts, defaults to a predefined template.
max_token_limit (int): Maximum token limit for the conversational buffer.
"""
self.llm = llm if llm is not None else ChatOpenAI(model_name='gpt-4-1106-preview')
self.embedding_model = embedding_model if embedding_model is not None else OpenAIEmbeddings()
self.prompt_template = prompt_template if prompt_template is not None else self._default_prompt_template()
self.prompt = PromptTemplate(template=self.prompt_template,
input_variables=["context", "chat_history", "question"])
self.max_token_limit = max_token_limit
self.document_loader = DocumentLoader()
|
class LongTrainer:
def __init__(self, mongo_endpoint='mongodb://localhost:27017/', llm=None,
embedding_model=None,
prompt_template=None, max_token_limit=32000):
"""
Initialize the LongTrainer with optional language learning model, embedding model,
prompt template, maximum token limit, and MongoDB endpoint.
Args:
mongo_endpoint (str): MongoDB connection string.
llm: Language learning model, defaults to ChatOpenAI with GPT-4.
embedding_model: Embedding model for document vectorization, defaults to OpenAIEmbeddings.
prompt_template: Template for generating prompts, defaults to a predefined template.
max_token_limit (int): Maximum token limit for the conversational buffer.
"""
self.llm = llm if llm is not None else ChatOpenAI(model_name='gpt-4-1106-preview')
self.embedding_model = embedding_model if embedding_model is not None else OpenAIEmbeddings()
self.prompt_template = prompt_template if prompt_template is not None else self._default_prompt_template()
self.prompt = PromptTemplate(template=self.prompt_template,
input_variables=["context", "chat_history", "question"])
self.max_token_limit = max_token_limit
self.document_loader = DocumentLoader() | self.text_splitter = TextSplitter(chunk_size=1024, chunk_overlap=100) | 1 | 2023-12-07 16:37:26+00:00 | 8k |
pan-x-c/EE-LLM | tools/retro/main.py | [
{
"identifier": "get_args",
"path": "megatron/global_vars.py",
"snippet": "def get_args():\n \"\"\"Return arguments.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_ARGS, 'args')\n return _GLOBAL_ARGS"
},
{
"identifier": "initialize_megatron",
"path": "megatron/initialize.py",
"snipp... | import json
import os
import torch
from megatron import get_args, initialize_megatron, print_rank_0
from megatron.global_vars import set_retro_args
from tools.retro.db import build_db
from tools.retro.index import add_to_index, build_index, train_index
from tools.retro.query import query_pretraining_neighbors
from tools.retro.utils import get_args_path | 3,944 | # Index args.
group.add_argument("--retro-index-nfeats", "-f", type=int, default=1024,
help="Dimension of Bert embeddings. Bert-large is "
"commonly used, so this value defaults to 1024.")
group.add_argument("--retro-index-type", default="faiss-par-add",
choices=["faiss-base", "faiss-par-add"],
help="A 'faiss-base' index is a simple, un-optimized "
"wrapper around a Faiss index. A 'faiss-par-add' index "
"optimizes the 'add()' method by making it multi-node "
"and multi-process, but with bit-wise equivalent "
"results.")
group.add_argument("--retro-index-str", required=True,
help="Index string used for calling "
"faiss.index_factory(). For example, "
"'IVF262144_HNSW32,Flat' or "
"'OPQ32_256,IVF4194304_HNSW32,PQ32'.")
group.add_argument("--retro-index-ntrain", type=int, required=True,
help="Number of database chunks to use for training "
"the index. This value must be less or equal to the "
"total number of chunks in the database.")
group.add_argument("--retro-index-train-load-fraction",
type=float, default=1.,
help="Fraction of sampled chunks to use for training "
"the index. Useful when our total sampled embeddings "
"use too much memory; lowering the load fraction is "
"less costly than re-embedding a new sampled dataset "
"from scratch.")
group.add_argument("--retro-index-add-load-fraction",
type=float, default=1.,
help="Fraction of database chunks to use for adding to "
"the index. Useful when our total index size would "
"use too much memory; lowering the load fraction is "
"less costly than re-designing our token datasets.")
group.add_argument("--retro-index-no-delete-training-embeddings",
action='store_false',
dest="retro_index_delete_training_embeddings",
help="Skip deleting training embeddings for the search "
"index. Useful for debugging.")
group.add_argument("--retro-index-no-delete-added-codes",
action='store_false',
dest="retro_index_delete_added_codes",
help="Skip deleting added codes for the search "
"index. Useful for debugging.")
# Query args.
group.add_argument("--retro-query-ef-search", type=int, default=256,
help="Index ef-search parameter for HNSW during querying.")
group.add_argument("--retro-query-nprobe", type=int, default=65536,
help="Index nprobe parameter for IVF during querying.")
group.add_argument("--retro-query-num-neighbors-query", type=int, default=200,
help="Number of neighbors to retrieve when calling "
"index.search().")
group.add_argument("--retro-query-num-neighbors-save", type=int, default=20,
help="Number of neighbors to save to disk after "
"the index's returned neighbors. If longer than target "
"value, neighbors truncated; and if shorter than target "
"value, neighbors are padded with -1's.")
# Enforce argument naming convention.
for action in group._group_actions:
prefix = action.dest.split("_")[0]
assert prefix == "retro", \
"Retro args must be prefixed with '--retro-*', for consistent " \
"styling. Please fix '%s'." % ", ".join(action.option_strings)
return parser
def save_args(args):
'''Save copy of args within retro workdir.'''
def default_dump(obj):
if isinstance(obj, torch.dtype):
return str(obj)
else:
raise Exception("specialize for <%s>." % type(obj).__name__)
if torch.distributed.get_rank() == 0:
args_path = get_args_path(args.retro_workdir)
with open(args_path, "w") as f:
json.dump(vars(args), f, indent=4, default=default_dump)
torch.distributed.barrier()
if __name__ == "__main__":
# Initalize Megatron.
initialize_megatron(extra_args_provider=add_retro_args)
# Split retro tasks.
args = get_args()
args.retro_tasks = args.retro_tasks.split(",")
# Save/set retro args.
os.makedirs(args.retro_workdir, exist_ok=True)
save_args(args)
set_retro_args(args)
# Select task to run.
for task in args.retro_tasks:
print_rank_0("start '%s'." % task)
# Run all stages.
if task == "build":
build_db()
torch.distributed.barrier()
build_index()
torch.distributed.barrier()
query_pretraining_neighbors()
# DB (i.e., chunk db).
elif task == "db-build":
build_db()
# Index.
elif task == "index-build":
build_index() # calls both train + add.
elif task == "index-train":
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Preprocess data for Retro.
Stages (see argument '--retro-tasks'):
- Build chunk database (DB).
- Build index (train, add).
- Query pretraining neighbors.
"""
def add_retro_args(parser):
"""Retro preprocesing arguments.
*Note* : Arguments prefixed with '--retro-gpt-*' or '--retro-bert-*' are
included and named as such to more easily handle managing both models
running at the same time. Megatron is not optimized to run two models at
once, so this naming convention makes it clearer.
"""
group = parser.add_argument_group(title="Retro preprocessing.")
# Basic args.
group.add_argument("--retro-tasks", default="build",
help="Comma-separated list of tasks to run. Run entire "
"preprocesing pipeline by using '--retro-tasks build'. "
"Alternatively, run individual stages with tasks (in "
"this order) 'db-build', 'index-build', or "
"'query-pretraining-neighbors'. For example, "
"'--retro-tasks db-build,index-build,"
"query-pretraining-neighbors' is equivalent to "
"'--retro-tasks build'; or the argument can contain "
"a subset of these tasks. Stages must always be run "
"in the correct order (listed above).")
group.add_argument("--retro-block-size", type=int, default=100000,
help="Number of chunks to process at a time when "
"generating Bert embeddings and querying the search "
"index. Partial results for each block are generally "
"saved to disk in separate files.")
group.add_argument("--retro-doc-block-size", type=int, default=100000,
help="Number of documents to processe at time when "
"processing token datasets into chunk databases. The "
"partial chunk database for each block is saved into "
"a separate file.")
# GPT args.
group.add_argument('--retro-gpt-seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--retro-gpt-data-path', nargs='*', required=True,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ... It is used with --split when a '
'single dataset used for all three: train, valid '
'and test. It is exclusive to the other '
'--*-data-path args')
group.add_argument('--retro-gpt-split', type=str, default='969,30,1',
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
group.add_argument('--retro-gpt-mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument("--retro-gpt-eval-interval", type=int, required=True,
help="GPT evaluation interval.")
group.add_argument("--retro-gpt-eval-iters", type=int, required=True,
help="GPT evaluation iterations.")
group.add_argument("--retro-gpt-tokenizer-type", required=True,
help="GPT tokenizer type.")
group.add_argument("--retro-gpt-vocab-file", help="GPT vocab file.")
group.add_argument("--retro-gpt-merge-file", help="GPT merge file.")
group.add_argument("--retro-gpt-tokenizer-model",
help="GPT tokenizer model file.")
group.add_argument("--retro-gpt-seq-length", type=int, required=True,
help="GPT sequence length.")
group.add_argument("--retro-gpt-global-batch-size", type=int, required=True,
help="GPT global batch size.")
group.add_argument("--retro-gpt-chunk-length", type=int, default=64,
help="GPT chunk length.")
# Bert args.
group.add_argument("--retro-bert-vocab-file", required=True,
help="Bert vocab file.")
group.add_argument("--retro-bert-tokenizer-type", required=True,
help="Bert tokenizer type (for when using "
"'--bert-embedder-type megatron').")
group.add_argument("--retro-bert-batch-size", type=int, default=128,
help="Micro-batch size for processing Bert embeddings.")
group.add_argument("--retro-bert-max-chunk-length", type=int, default=256,
help="Maximum sequence length for Bert embeddings. "
"(Named 'chunk' here in reference to these Bert "
"sequences being converted from GPT chunks.)")
# Index args.
group.add_argument("--retro-index-nfeats", "-f", type=int, default=1024,
help="Dimension of Bert embeddings. Bert-large is "
"commonly used, so this value defaults to 1024.")
group.add_argument("--retro-index-type", default="faiss-par-add",
choices=["faiss-base", "faiss-par-add"],
help="A 'faiss-base' index is a simple, un-optimized "
"wrapper around a Faiss index. A 'faiss-par-add' index "
"optimizes the 'add()' method by making it multi-node "
"and multi-process, but with bit-wise equivalent "
"results.")
group.add_argument("--retro-index-str", required=True,
help="Index string used for calling "
"faiss.index_factory(). For example, "
"'IVF262144_HNSW32,Flat' or "
"'OPQ32_256,IVF4194304_HNSW32,PQ32'.")
group.add_argument("--retro-index-ntrain", type=int, required=True,
help="Number of database chunks to use for training "
"the index. This value must be less or equal to the "
"total number of chunks in the database.")
group.add_argument("--retro-index-train-load-fraction",
type=float, default=1.,
help="Fraction of sampled chunks to use for training "
"the index. Useful when our total sampled embeddings "
"use too much memory; lowering the load fraction is "
"less costly than re-embedding a new sampled dataset "
"from scratch.")
group.add_argument("--retro-index-add-load-fraction",
type=float, default=1.,
help="Fraction of database chunks to use for adding to "
"the index. Useful when our total index size would "
"use too much memory; lowering the load fraction is "
"less costly than re-designing our token datasets.")
group.add_argument("--retro-index-no-delete-training-embeddings",
action='store_false',
dest="retro_index_delete_training_embeddings",
help="Skip deleting training embeddings for the search "
"index. Useful for debugging.")
group.add_argument("--retro-index-no-delete-added-codes",
action='store_false',
dest="retro_index_delete_added_codes",
help="Skip deleting added codes for the search "
"index. Useful for debugging.")
# Query args.
group.add_argument("--retro-query-ef-search", type=int, default=256,
help="Index ef-search parameter for HNSW during querying.")
group.add_argument("--retro-query-nprobe", type=int, default=65536,
help="Index nprobe parameter for IVF during querying.")
group.add_argument("--retro-query-num-neighbors-query", type=int, default=200,
help="Number of neighbors to retrieve when calling "
"index.search().")
group.add_argument("--retro-query-num-neighbors-save", type=int, default=20,
help="Number of neighbors to save to disk after "
"the index's returned neighbors. If longer than target "
"value, neighbors truncated; and if shorter than target "
"value, neighbors are padded with -1's.")
# Enforce argument naming convention.
for action in group._group_actions:
prefix = action.dest.split("_")[0]
assert prefix == "retro", \
"Retro args must be prefixed with '--retro-*', for consistent " \
"styling. Please fix '%s'." % ", ".join(action.option_strings)
return parser
def save_args(args):
'''Save copy of args within retro workdir.'''
def default_dump(obj):
if isinstance(obj, torch.dtype):
return str(obj)
else:
raise Exception("specialize for <%s>." % type(obj).__name__)
if torch.distributed.get_rank() == 0:
args_path = get_args_path(args.retro_workdir)
with open(args_path, "w") as f:
json.dump(vars(args), f, indent=4, default=default_dump)
torch.distributed.barrier()
if __name__ == "__main__":
# Initalize Megatron.
initialize_megatron(extra_args_provider=add_retro_args)
# Split retro tasks.
args = get_args()
args.retro_tasks = args.retro_tasks.split(",")
# Save/set retro args.
os.makedirs(args.retro_workdir, exist_ok=True)
save_args(args)
set_retro_args(args)
# Select task to run.
for task in args.retro_tasks:
print_rank_0("start '%s'." % task)
# Run all stages.
if task == "build":
build_db()
torch.distributed.barrier()
build_index()
torch.distributed.barrier()
query_pretraining_neighbors()
# DB (i.e., chunk db).
elif task == "db-build":
build_db()
# Index.
elif task == "index-build":
build_index() # calls both train + add.
elif task == "index-train": | train_index() # train only | 7 | 2023-12-07 08:29:38+00:00 | 8k |
mitrefireline/simharness | simharness2/analytics/harness_analytics.py | [
{
"identifier": "FireSimulationAnalytics",
"path": "simharness2/analytics/simulation_analytics.py",
"snippet": "class FireSimulationAnalytics(SimulationAnalytics):\n \"\"\"Use `FireSimulationAnalytics` to monitor the `fire_map` within a `FireSimulation`.\n\n Attributes:\n sim: TODO\n ... | import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from functools import partial
from typing import Any, Optional, Dict
from simfire.sim.simulation import FireSimulation
from simharness2.analytics.simulation_analytics import FireSimulationAnalytics
from simharness2.agents import ReactiveAgent | 3,829 | logger = logging.getLogger("ray.rllib")
class RLHarnessAnalytics(ABC):
"""Base class with several built in methods."""
def __init__(
self,
*,
sim: FireSimulation,
sim_analytics_partial: partial,
# use_benchmark_sim: bool = False
benchmark_sim: FireSimulation = None,
) -> None:
"""TODO: Add docstring."""
# Store objects used to track simulation data within each episode in a run.
try:
self.sim_analytics: FireSimulationAnalytics = sim_analytics_partial(sim=sim)
if benchmark_sim:
self.benchmark_sim_analytics: FireSimulationAnalytics = (
sim_analytics_partial(sim=benchmark_sim, is_benchmark=True)
)
else:
self.benchmark_sim_analytics: FireSimulationAnalytics = None
except TypeError as e:
raise e
self.best_episode_performance: Optional[BestEpisodePerformance] = None
@abstractmethod
def update_after_one_simulation_step(self, *, timestep: int) -> None:
"""See subclass for docstring."""
pass
@abstractmethod
def update_after_one_agent_step(
self,
*,
timestep: int,
) -> None:
"""See subclass for docstring."""
pass
@abstractmethod
def update_after_one_harness_step(
self, sim_run: bool, terminated: bool, reward: float, *, timestep: int
) -> None:
"""See subclass for docstring."""
pass
@abstractmethod
def reset(self):
"""See subclass for docstring."""
pass
class ReactiveHarnessAnalytics(RLHarnessAnalytics):
"""TODO Add description."""
def __init__(
self,
*,
sim: FireSimulation,
sim_analytics_partial: partial,
agent_ids: set,
benchmark_sim: FireSimulation = None,
) -> None:
"""TODO Add summary line.
Arguments:
sim: The underlying `FireSimulation` object that contains the agent (s) that
are being trained. The agent (s) will place mitigation lines, and the
simulation will spread the fire. An episode terminates when the fire is
finished spreading.
sim_analytics_partial: A `functools.partial` object that defines the class
that willbbe used to monitor and track `self.sim`, and
`self.benchmark_sim`, if the optional `benchmark_sim` is provided. The
user is expected to provide the `agent_analytics_partial` keyword
argument, along with a valid value.
agent_ids: TODO
benchmark_sim: A separate `FireSimulation` object, identical to
`sim` (after initialization). No mitigation lines will be placed in this
simulation, as it does not contain any agent (s).
Raises:
TypeError: If `sim_analytics_partial.keywords` does not contain a
`agent_analytics_partial` key with value of type `functools.partial`.
"""
# NOTE: Below is a hacky way to specify agent ids; Fix later
# Inject `agent_ids` into keywords of `agent_analytics_partial`
agent_partial: partial = sim_analytics_partial.keywords["agent_analytics_partial"]
agent_partial.keywords.update({"agent_ids": agent_ids})
sim_analytics_partial.keywords["agent_analytics_partial"] = agent_partial
# Initialize sim_analytics object (s) and best_episode_performance attribute.
super().__init__(
sim=sim,
sim_analytics_partial=sim_analytics_partial,
benchmark_sim=benchmark_sim,
)
# Define attributes that are needed/accessed within `ComprehensiveReward` class.
# TODO: Address where these attributes should be stored, see
# https://gitlab.mitre.org/fireline/reinforcementlearning/simharness2/-/merge_requests/6#note_1504742
if self.benchmark_sim_analytics:
# track the existence of the benchmark sim to generate the comparative (ex. area saved or burn rate reduction) metrics
self.sim_analytics.benchmark_exists = True
# Track the latest episode reward
# TODO is this the reward for the latest timestep or the latest episode?
# FIXME: Decide how and where this attribute is/should be used.
self.latest_reward = 0.0
self.episodes_total = 0
def update_after_one_agent_step(
self,
*,
timestep: int,
| """Base AnalyticsTracker for SimHarness and BaseReward."""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
)
logger.addHandler(handler)
logger.propagate = False
logger = logging.getLogger("ray.rllib")
class RLHarnessAnalytics(ABC):
"""Base class with several built in methods."""
def __init__(
self,
*,
sim: FireSimulation,
sim_analytics_partial: partial,
# use_benchmark_sim: bool = False
benchmark_sim: FireSimulation = None,
) -> None:
"""TODO: Add docstring."""
# Store objects used to track simulation data within each episode in a run.
try:
self.sim_analytics: FireSimulationAnalytics = sim_analytics_partial(sim=sim)
if benchmark_sim:
self.benchmark_sim_analytics: FireSimulationAnalytics = (
sim_analytics_partial(sim=benchmark_sim, is_benchmark=True)
)
else:
self.benchmark_sim_analytics: FireSimulationAnalytics = None
except TypeError as e:
raise e
self.best_episode_performance: Optional[BestEpisodePerformance] = None
@abstractmethod
def update_after_one_simulation_step(self, *, timestep: int) -> None:
"""See subclass for docstring."""
pass
@abstractmethod
def update_after_one_agent_step(
self,
*,
timestep: int,
) -> None:
"""See subclass for docstring."""
pass
@abstractmethod
def update_after_one_harness_step(
self, sim_run: bool, terminated: bool, reward: float, *, timestep: int
) -> None:
"""See subclass for docstring."""
pass
@abstractmethod
def reset(self):
"""See subclass for docstring."""
pass
class ReactiveHarnessAnalytics(RLHarnessAnalytics):
"""TODO Add description."""
def __init__(
self,
*,
sim: FireSimulation,
sim_analytics_partial: partial,
agent_ids: set,
benchmark_sim: FireSimulation = None,
) -> None:
"""TODO Add summary line.
Arguments:
sim: The underlying `FireSimulation` object that contains the agent (s) that
are being trained. The agent (s) will place mitigation lines, and the
simulation will spread the fire. An episode terminates when the fire is
finished spreading.
sim_analytics_partial: A `functools.partial` object that defines the class
that willbbe used to monitor and track `self.sim`, and
`self.benchmark_sim`, if the optional `benchmark_sim` is provided. The
user is expected to provide the `agent_analytics_partial` keyword
argument, along with a valid value.
agent_ids: TODO
benchmark_sim: A separate `FireSimulation` object, identical to
`sim` (after initialization). No mitigation lines will be placed in this
simulation, as it does not contain any agent (s).
Raises:
TypeError: If `sim_analytics_partial.keywords` does not contain a
`agent_analytics_partial` key with value of type `functools.partial`.
"""
# NOTE: Below is a hacky way to specify agent ids; Fix later
# Inject `agent_ids` into keywords of `agent_analytics_partial`
agent_partial: partial = sim_analytics_partial.keywords["agent_analytics_partial"]
agent_partial.keywords.update({"agent_ids": agent_ids})
sim_analytics_partial.keywords["agent_analytics_partial"] = agent_partial
# Initialize sim_analytics object (s) and best_episode_performance attribute.
super().__init__(
sim=sim,
sim_analytics_partial=sim_analytics_partial,
benchmark_sim=benchmark_sim,
)
# Define attributes that are needed/accessed within `ComprehensiveReward` class.
# TODO: Address where these attributes should be stored, see
# https://gitlab.mitre.org/fireline/reinforcementlearning/simharness2/-/merge_requests/6#note_1504742
if self.benchmark_sim_analytics:
# track the existence of the benchmark sim to generate the comparative (ex. area saved or burn rate reduction) metrics
self.sim_analytics.benchmark_exists = True
# Track the latest episode reward
# TODO is this the reward for the latest timestep or the latest episode?
# FIXME: Decide how and where this attribute is/should be used.
self.latest_reward = 0.0
self.episodes_total = 0
def update_after_one_agent_step(
self,
*,
timestep: int, | agents: Dict[Any, ReactiveAgent], | 1 | 2023-12-08 19:13:31+00:00 | 8k |
racinette/querky | querky/query.py | [
{
"identifier": "logger",
"path": "querky/logger.py",
"snippet": ""
},
{
"identifier": "QueryInitializationError",
"path": "querky/exceptions.py",
"snippet": "class QueryInitializationError(Exception):\n def __init__(self, query: Query, additional_hint: str | None = None) -> None:\n ... | import inspect
import typing
from inspect import Parameter
from os import path
from querky.logger import logger
from querky.exceptions import QueryInitializationError
from querky.helpers import ReprHelper, DictGetAttr
from querky.base_types import TypeKnowledge, QuerySignature
from querky.conn_param_config import ConnParamConfig
from querky.param_mapper import ParamMapper
from querky.attr import attr as _attr_, Attr
from querky.result_shape import Value, Column, Status, All, One, ResultShape
from querky.module_constructor import ModuleConstructor | 4,486 | from __future__ import annotations
if typing.TYPE_CHECKING:
RS = typing.TypeVar('RS', bound='ResultShape')
class Query(typing.Generic[RS]):
defaults: dict[str, typing.Any]
def __init__(
self,
func: typing.Callable,
shape: typing.Callable[[Query], RS],
module: ModuleConstructor,
conn_param_config: ConnParamConfig,
explicit_name: typing.Optional[str],
parent_query: typing.Optional[Query[One | All]],
kwargs: typing.Optional[typing.Dict[str, typing.Any]]
) -> None:
self.parent_query: Query[One | All] | None = parent_query
self.imports = set()
self.kwargs = kwargs or dict()
self.query = func
self.name = explicit_name or func.__name__
self.conn_param_config = conn_param_config
self.sig = inspect.signature(func)
self.template_signature = None
self.module = module
self.module.queries_list.append(self)
self.param_mapper: ParamMapper = self.contract.create_param_mapper(self)
self.sql = self.param_mapper.parametrize_query()
self.default = DictGetAttr(self.param_mapper.defaults)
# side effect: attr gets populated, so we flush it
self.attr_hints: dict[str, Attr] = {
a.name: a
for a in _attr_.__getattrs__()
}
module_filename = self.module.module.__file__
common = path.commonprefix([module.querky.basedir, module_filename])
self.relative_path = module_filename[len(common):]
self.unique_name = f"{self.relative_path}:{self.query.__name__}"
self.local_name = self.get_local_name()
self.query_signature: QuerySignature | None = None
self.conn_type_knowledge: TypeKnowledge | None = None
self.bound_type = None
self.shape: ResultShape = shape(self)
if not isinstance(self.shape, (One, All)) and parent_query:
raise ValueError("Only One and All queries can have a parent query.")
if parent_query and not isinstance(parent_query.shape, (One, All)):
raise ValueError("Parent query must be of either One or All shape.")
logger.debug(
"Query: %s\nSQL: %s",
self.unique_name, self.sql
)
@property
def annotation_generator(self):
return self.querky.annotation_generator
@property
def contract(self):
return self.module.querky.contract
@property
def querky(self):
return self.module.querky
def bind_type(self, t) -> None:
self.bound_type = t
async def execute(self, conn, *args, **kwargs):
params = self.param_mapper.map_params(*args, **kwargs)
return await self.shape.fetch(conn, params)
def execute_sync(self, conn, *args, **kwargs):
params = self.param_mapper.map_params(*args, **kwargs)
return self.shape.fetch_sync(conn, params)
def _after_types_fetched(self):
# типы параметров передадим мапперу
self.param_mapper.assign_type_knowledge(self.query_signature.parameters)
# а типы аттрибутов - результату
self.shape.set_attributes(self.query_signature.attributes)
async def fetch_types(self, db) -> None:
try:
self.query_signature = await self.contract.get_query_signature(db, self)
self._after_types_fetched()
| from __future__ import annotations
if typing.TYPE_CHECKING:
RS = typing.TypeVar('RS', bound='ResultShape')
class Query(typing.Generic[RS]):
defaults: dict[str, typing.Any]
def __init__(
self,
func: typing.Callable,
shape: typing.Callable[[Query], RS],
module: ModuleConstructor,
conn_param_config: ConnParamConfig,
explicit_name: typing.Optional[str],
parent_query: typing.Optional[Query[One | All]],
kwargs: typing.Optional[typing.Dict[str, typing.Any]]
) -> None:
self.parent_query: Query[One | All] | None = parent_query
self.imports = set()
self.kwargs = kwargs or dict()
self.query = func
self.name = explicit_name or func.__name__
self.conn_param_config = conn_param_config
self.sig = inspect.signature(func)
self.template_signature = None
self.module = module
self.module.queries_list.append(self)
self.param_mapper: ParamMapper = self.contract.create_param_mapper(self)
self.sql = self.param_mapper.parametrize_query()
self.default = DictGetAttr(self.param_mapper.defaults)
# side effect: attr gets populated, so we flush it
self.attr_hints: dict[str, Attr] = {
a.name: a
for a in _attr_.__getattrs__()
}
module_filename = self.module.module.__file__
common = path.commonprefix([module.querky.basedir, module_filename])
self.relative_path = module_filename[len(common):]
self.unique_name = f"{self.relative_path}:{self.query.__name__}"
self.local_name = self.get_local_name()
self.query_signature: QuerySignature | None = None
self.conn_type_knowledge: TypeKnowledge | None = None
self.bound_type = None
self.shape: ResultShape = shape(self)
if not isinstance(self.shape, (One, All)) and parent_query:
raise ValueError("Only One and All queries can have a parent query.")
if parent_query and not isinstance(parent_query.shape, (One, All)):
raise ValueError("Parent query must be of either One or All shape.")
logger.debug(
"Query: %s\nSQL: %s",
self.unique_name, self.sql
)
@property
def annotation_generator(self):
return self.querky.annotation_generator
@property
def contract(self):
return self.module.querky.contract
@property
def querky(self):
return self.module.querky
def bind_type(self, t) -> None:
self.bound_type = t
async def execute(self, conn, *args, **kwargs):
params = self.param_mapper.map_params(*args, **kwargs)
return await self.shape.fetch(conn, params)
def execute_sync(self, conn, *args, **kwargs):
params = self.param_mapper.map_params(*args, **kwargs)
return self.shape.fetch_sync(conn, params)
def _after_types_fetched(self):
# типы параметров передадим мапперу
self.param_mapper.assign_type_knowledge(self.query_signature.parameters)
# а типы аттрибутов - результату
self.shape.set_attributes(self.query_signature.attributes)
async def fetch_types(self, db) -> None:
try:
self.query_signature = await self.contract.get_query_signature(db, self)
self._after_types_fetched() | except QueryInitializationError: | 1 | 2023-12-13 15:16:34+00:00 | 8k |
javrtg/C2P | nonmin_pose/constraints/constraint_manager.py | [
{
"identifier": "constraints",
"path": "nonmin_pose/constraints/constraints.py",
"snippet": "def assert_smaller_idxes(param1i, param2i):\n def __init__(self, name: str, block: int, block_ids: List[int]):\n def __init__(\n self,\n params: dict,\n idx_first_el: int,\n idx... | from collections import defaultdict
from typing import Dict, List, Optional, Union
from nonmin_pose.constraints import constraints as cnt
from nonmin_pose.constraints.constraints import Parameter
import numpy as np | 3,909 |
ConstraintConfig = Union[Dict[str, Optional[List[int]]], Dict[str, None]]
class ConstraintManager:
"""Manager of the metadata of constraints (blocks, values, indexes, etc.)."""
CONSTRAINT_CLASSES = {
|
ConstraintConfig = Union[Dict[str, Optional[List[int]]], Dict[str, None]]
class ConstraintManager:
"""Manager of the metadata of constraints (blocks, values, indexes, etc.)."""
CONSTRAINT_CLASSES = { | "manif_def_left": cnt.ManifDefLeft, | 1 | 2023-12-10 18:25:10+00:00 | 8k |
Jack24658735/FedLGT | models/CTran.py | [
{
"identifier": "SelfAttnLayer",
"path": "models/transformer_layers.py",
"snippet": "class SelfAttnLayer(nn.Module):\n def __init__(self, d_model, nhead = 4,dropout=0.1):\n super().__init__()\n self.transformer_layer = TransformerEncoderLayer(d_model, nhead, d_model*1, dropout=dropout, ... | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .transformer_layers import SelfAttnLayer
from .backbone import Backbone, BackboneCLIP
from .utils import custom_replace,weights_init
from .position_enc import PositionEmbeddingSine,positionalencoding2d
from .ml_decoder import MLDecoder | 3,637 |
class CTranModel(nn.Module):
def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):
super(CTranModel, self).__init__()
self.use_lmt = use_lmt
self.no_x_features = no_x_features # (for no image features)
# ResNet backbone
self.backbone = Backbone()
# self.backbone_c = BackboneCLIP()
hidden = 512 # this should match the backbone output feature size
self.downsample = False
if self.downsample:
self.conv_downsample = torch.nn.Conv2d(hidden,hidden,(1,1))
# Label Embeddings
self.label_input = torch.Tensor(np.arange(num_labels)).view(1,-1).long()
self.label_lt = torch.nn.Embedding(num_labels, hidden, padding_idx=None)
self.clip_label_lt = nn.Embedding.from_pretrained(label_weight, freeze=True, padding_idx=None)
# State Embeddings
self.known_label_lt = nn.Embedding.from_pretrained(state_weight, freeze=True, padding_idx=0)
# self.known_label_lt = torch.nn.Embedding(3, hidden, padding_idx=0)
# Position Embeddings (for image features)
self.use_pos_enc = pos_emb
if self.use_pos_enc:
# self.position_encoding = PositionEmbeddingSine(int(hidden/2), normalize=True)
self.position_encoding = positionalencoding2d(hidden, 18, 18).unsqueeze(0)
# Transformer
|
class CTranModel(nn.Module):
def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):
super(CTranModel, self).__init__()
self.use_lmt = use_lmt
self.no_x_features = no_x_features # (for no image features)
# ResNet backbone
self.backbone = Backbone()
# self.backbone_c = BackboneCLIP()
hidden = 512 # this should match the backbone output feature size
self.downsample = False
if self.downsample:
self.conv_downsample = torch.nn.Conv2d(hidden,hidden,(1,1))
# Label Embeddings
self.label_input = torch.Tensor(np.arange(num_labels)).view(1,-1).long()
self.label_lt = torch.nn.Embedding(num_labels, hidden, padding_idx=None)
self.clip_label_lt = nn.Embedding.from_pretrained(label_weight, freeze=True, padding_idx=None)
# State Embeddings
self.known_label_lt = nn.Embedding.from_pretrained(state_weight, freeze=True, padding_idx=0)
# self.known_label_lt = torch.nn.Embedding(3, hidden, padding_idx=0)
# Position Embeddings (for image features)
self.use_pos_enc = pos_emb
if self.use_pos_enc:
# self.position_encoding = PositionEmbeddingSine(int(hidden/2), normalize=True)
self.position_encoding = positionalencoding2d(hidden, 18, 18).unsqueeze(0)
# Transformer | self.self_attn_layers = nn.ModuleList([SelfAttnLayer(hidden,heads,dropout) for _ in range(layers)]) | 0 | 2023-12-09 09:16:59+00:00 | 8k |
AgriCodeHub/dairy-django-backend | health/serializers.py | [
{
"identifier": "Cow",
"path": "core/models.py",
"snippet": "class Cow(models.Model):\n \"\"\"\n Represents an individual cow in the dairy farm.\n\n Attributes:\n - `name` (str): The name of the cow.\n - `breed` (CowBreed): The breed of the cow.\n - `date_of_birth` (date): The birthdat... | from rest_framework import serializers
from core.models import Cow
from health.models import (
DiseaseCategory,
WeightRecord,
CullingRecord,
QuarantineRecord,
Pathogen,
Symptoms,
Disease,
Recovery,
Treatment,
) | 6,470 | class Meta:
model = DiseaseCategory
fields = ("name",)
```
"""
class Meta:
model = DiseaseCategory
fields = ("name",)
class SymptomsSerializer(serializers.ModelSerializer):
"""
Serializer for the Symptoms model.
Fields:
- `name`: The name of the symptom.
- `symptom_type`: The type of the symptom.
- `description`: Description of the symptom (nullable).
- `date_observed`: Date when the symptom was observed.
- `severity`: Severity of the symptom.
- `location`: Location of the symptom.
Meta:
- `model`: The Symptoms model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
"""
class Meta:
model = Symptoms
fields = (
"name",
"symptom_type",
"description",
"date_observed",
"severity",
"location",
)
class DiseaseSerializer(serializers.ModelSerializer):
"""
Serializer for the Disease model.
Fields:
- `name`: The name of the disease.
- `pathogen`: The pathogen causing the disease.
- `category`: The category of the disease.
- `date_reported`: Date when the disease was reported.
- `occurrence_date`: Date when the disease occurred.
- `notes`: Additional notes about the disease (nullable).
- `cows`: Cows affected by the disease.
- `symptoms`: Symptoms associated with the disease.
Meta:
- `model`: The Disease model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Note: The `cows` and `symptoms` fields are represented by their primary keys in the serialized data.
"""
class Meta:
model = Disease
fields = (
"name",
"pathogen",
"category",
"date_reported",
"occurrence_date",
"notes",
"cows",
"symptoms",
)
class RecoverySerializer(serializers.ModelSerializer):
"""
Serializer for the Recovery model.
Fields:
- `cow`: The cow recovering from the disease.
- `disease`: The disease from which the cow is recovering.
- `diagnosis_date`: Date when the disease was diagnosed.
- `recovery_date`: Date when the cow recovered (nullable).
Meta:
- `model`: The Recovery model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Note: The `cow` and `disease` fields are represented by their primary keys in the serialized data.
"""
class Meta:
model = Recovery
fields = ("cow", "disease", "diagnosis_date", "recovery_date")
class TreatmentSerializer(serializers.ModelSerializer):
"""
Serializer for the Treatment model.
Fields:
- `disease`: The disease for which the cow is receiving treatment.
- `cow`: The cow undergoing treatment.
- `date_of_treatment`: Date when the treatment was initiated.
- `treatment_method`: Description of the treatment method (max length: 300).
- `notes`: Additional notes about the treatment (nullable).
- `treatment_status`: Status of the treatment.
- `completion_date`: Date when the treatment was completed (nullable).
Meta:
- `model`: The Treatment model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Note: The `disease` and `cow` fields are represented by their primary keys in the serialized data.
"""
class Meta:
|
class WeightRecordSerializer(serializers.ModelSerializer):
"""
Serializer for the WeightRecord model.
Fields:
- `cow`: A primary key related field representing the cow associated with the weight record.
- `weight_in_kgs`: A decimal field representing the weight of the cow in kilograms.
- `date_taken`: A date field representing the date when the weight record was taken.
Meta:
- `model`: The WeightRecord model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Usage:
Use this serializer to convert WeightRecord model instances to JSON representations
and vice versa.
Example:
```
class WeightRecord(models.Model):
cow = models.ForeignKey(Cow, on_delete=models.CASCADE)
weight_in_kgs = models.DecimalField(max_digits=6, decimal_places=2)
date_taken = models.DateField(auto_now_add=True)
class WeightRecordSerializer(serializers.ModelSerializer):
class Meta:
model = WeightRecord
fields = ("cow", "weight_in_kgs", "date_taken")
```
"""
cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all())
class Meta:
model = WeightRecord
fields = ("cow", "weight_in_kgs", "date_taken")
class CullingRecordSerializer(serializers.ModelSerializer):
"""
Serializer for the CullingRecord model.
Fields:
- `cow`: A primary key related field representing the cow associated with the culling record.
- `reason`: A field representing the reason for culling, chosen from predefined choices.
- `notes`: A text field representing additional notes or comments about the culling.
- `date_carried`: A date field representing the date when the culling record was created.
Meta:
- `model`: The CullingRecord model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Usage:
Use this serializer to convert CullingRecord model instances to JSON representations
and vice versa.
Example:
```python
class CullingRecord(models.Model):
cow = models.OneToOneField(Cow, on_delete=models.CASCADE, related_name="culling_record")
reason = models.CharField(max_length=35, choices=CullingReasonChoices.choices)
notes = models.TextField(null=True, max_length=100)
date_carried = models.DateField(auto_now_add=True)
class CullingRecordSerializer(serializers.ModelSerializer):
class Meta:
model = CullingRecord
fields = ("cow", "reason", "notes", "date_carried")
```
"""
cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all())
class Meta:
model = CullingRecord
fields = ("cow", "reason", "notes", "date_carried")
class QuarantineRecordSerializer(serializers.ModelSerializer):
"""
Serializer for the QuarantineRecord model.
Fields:
- `cow`: A primary key related field representing the cow associated with the quarantine record.
- `reason`: A choice field representing the reason for quarantine.
- `start_date`: A date field representing the start date of the quarantine record.
- `end_date`: A date field representing the end date of the quarantine record.
- `notes`: A text field representing optional notes for the quarantine record.
Meta:
- `model`: The QuarantineRecord model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Usage:
Use this serializer to convert QuarantineRecord model instances to JSON representations
and vice versa.
Example:
```
class QuarantineRecord(models.Model):
cow = models.ForeignKey(Cow, on_delete=models.CASCADE, related_name="quarantine_records")
reason = models.CharField(max_length=35, choices=QuarantineReasonChoices.choices)
start_date = models.DateField(auto_now_add=True)
end_date = models.DateField(null=True)
notes = models.TextField(null=True, max_length=100)
class QuarantineRecordSerializer(serializers.ModelSerializer):
class Meta:
model = QuarantineRecord
fields = ("cow", "reason", "start_date", "end_date", "notes")
```
"""
cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all())
class Meta:
model = QuarantineRecord
fields = ("cow", "reason", "start_date", "end_date", "notes")
class PathogenSerializer(serializers.ModelSerializer):
"""
Serializer for the Pathogen model.
Fields:
- `name`: A choice field representing the type of pathogen.
Meta:
- `model`: The Pathogen model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Usage:
Use this serializer to convert Pathogen model instances to JSON representations
and vice versa.
Example:
```
class Pathogen(models.Model):
name = models.CharField(max_length=10, choices=PathogenChoices.choices)
# diagnosis_date = models.DateField(auto_now_add=True)
class PathogenSerializer(serializers.ModelSerializer):
class Meta:
model = Pathogen
fields = ("name",)
```
"""
class Meta:
model = Pathogen
fields = ("name",)
class DiseaseCategorySerializer(serializers.ModelSerializer):
"""
Serializer for the DiseaseCategory model.
Fields:
- `name`: A choice field representing the type of disease.
Meta:
- `model`: The DiseaseCategory model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Usage:
Use this serializer to convert DiseaseCategory model instances to JSON representations
and vice versa.
Example:
```
class DiseaseCategory(models.Model):
name = models.CharField(max_length=15, choices=DiseaseCategoryChoices.choices)
class DiseaseCategorySerializer(serializers.ModelSerializer):
class Meta:
model = DiseaseCategory
fields = ("name",)
```
"""
class Meta:
model = DiseaseCategory
fields = ("name",)
class SymptomsSerializer(serializers.ModelSerializer):
"""
Serializer for the Symptoms model.
Fields:
- `name`: The name of the symptom.
- `symptom_type`: The type of the symptom.
- `description`: Description of the symptom (nullable).
- `date_observed`: Date when the symptom was observed.
- `severity`: Severity of the symptom.
- `location`: Location of the symptom.
Meta:
- `model`: The Symptoms model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
"""
class Meta:
model = Symptoms
fields = (
"name",
"symptom_type",
"description",
"date_observed",
"severity",
"location",
)
class DiseaseSerializer(serializers.ModelSerializer):
"""
Serializer for the Disease model.
Fields:
- `name`: The name of the disease.
- `pathogen`: The pathogen causing the disease.
- `category`: The category of the disease.
- `date_reported`: Date when the disease was reported.
- `occurrence_date`: Date when the disease occurred.
- `notes`: Additional notes about the disease (nullable).
- `cows`: Cows affected by the disease.
- `symptoms`: Symptoms associated with the disease.
Meta:
- `model`: The Disease model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Note: The `cows` and `symptoms` fields are represented by their primary keys in the serialized data.
"""
class Meta:
model = Disease
fields = (
"name",
"pathogen",
"category",
"date_reported",
"occurrence_date",
"notes",
"cows",
"symptoms",
)
class RecoverySerializer(serializers.ModelSerializer):
"""
Serializer for the Recovery model.
Fields:
- `cow`: The cow recovering from the disease.
- `disease`: The disease from which the cow is recovering.
- `diagnosis_date`: Date when the disease was diagnosed.
- `recovery_date`: Date when the cow recovered (nullable).
Meta:
- `model`: The Recovery model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Note: The `cow` and `disease` fields are represented by their primary keys in the serialized data.
"""
class Meta:
model = Recovery
fields = ("cow", "disease", "diagnosis_date", "recovery_date")
class TreatmentSerializer(serializers.ModelSerializer):
"""
Serializer for the Treatment model.
Fields:
- `disease`: The disease for which the cow is receiving treatment.
- `cow`: The cow undergoing treatment.
- `date_of_treatment`: Date when the treatment was initiated.
- `treatment_method`: Description of the treatment method (max length: 300).
- `notes`: Additional notes about the treatment (nullable).
- `treatment_status`: Status of the treatment.
- `completion_date`: Date when the treatment was completed (nullable).
Meta:
- `model`: The Treatment model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Note: The `disease` and `cow` fields are represented by their primary keys in the serialized data.
"""
class Meta: | model = Treatment | 9 | 2023-12-09 06:56:42+00:00 | 8k |
facebookresearch/chat2map-official | main.py | [
{
"identifier": "baseline_registry",
"path": "chat2map/common/baseline_registry.py",
"snippet": "class BaselineRegistry(Registry):\n def register_trainer(cls, to_register=None, *, name: Optional[str] = None):\n def get_trainer(cls, name):\n def register_env(cls, to_register=None, *, name: Optio... | import argparse
import logging
import warnings
import tensorflow as tf
import torch
from chat2map.common.baseline_registry import baseline_registry
from chat2map.config.default import get_config
from habitat_audio import * | 3,998 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
def main():
# command line args
parser = argparse.ArgumentParser()
parser.add_argument(
"--run-type",
choices=["train", "eval"],
default='train',
help="run type of the experiment (train or eval)",
)
parser.add_argument(
"--exp-config",
type=str,
default='baselines/config/pointnav_rgb.yaml',
help="path to config yaml containing info about experiment",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
parser.add_argument(
"--model-dir",
default=None,
help="Modify config options from command line",
)
parser.add_argument(
"--eval-interval",
type=int,
default=1,
help="Evaluation interval of checkpoints",
)
parser.add_argument(
"--prev-ckpt-ind",
type=int,
default=-1,
help="Evaluation interval of checkpoints",
)
args = parser.parse_args()
# run exp
config = get_config(args.exp_config, args.opts, args.model_dir, args.run_type)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
def main():
# command line args
parser = argparse.ArgumentParser()
parser.add_argument(
"--run-type",
choices=["train", "eval"],
default='train',
help="run type of the experiment (train or eval)",
)
parser.add_argument(
"--exp-config",
type=str,
default='baselines/config/pointnav_rgb.yaml',
help="path to config yaml containing info about experiment",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
parser.add_argument(
"--model-dir",
default=None,
help="Modify config options from command line",
)
parser.add_argument(
"--eval-interval",
type=int,
default=1,
help="Evaluation interval of checkpoints",
)
parser.add_argument(
"--prev-ckpt-ind",
type=int,
default=-1,
help="Evaluation interval of checkpoints",
)
args = parser.parse_args()
# run exp
config = get_config(args.exp_config, args.opts, args.model_dir, args.run_type) | trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME) | 0 | 2023-12-06 01:20:37+00:00 | 8k |
wrongbad/badcad | badcad/badcad.py | [
{
"identifier": "display",
"path": "badcad/utils.py",
"snippet": "def display(thing, \n vscode_fix=True, \n wireframe=False, \n color='#aaaa22', \n smoothing_threshold=-1,\n width=640,\n height=640,\n ):\n if vscode_fix:\n fix_vscode_style()\n \n... | import manifold3d
import numpy as np
from manifold3d import Manifold, CrossSection
from .utils import (
display,
triangle_normals,
polygon_nearest_alignment,
svg2polygons,
text2svg,
PolyPath
) | 4,491 |
def num_vert(self):
return self.cross_section.num_vert()
def offset(self, delta, join_type='miter', miter_limit=2, circular_segments=0):
if join_type == 'round':
join_type = manifold3d.JoinType.Round
elif join_type == 'miter':
join_type = manifold3d.JoinType.Miter
elif join_type == 'square':
join_type = manifold3d.JoinType.Square
else:
raise ValueError(f'{join_type=}')
return Shape(self.cross_section.offset(
delta, join_type, miter_limit, circular_segments
))
def revolve(self, z=360, fn=0):
return Solid(self.cross_section.revolve(
circular_segments=fn,
revolve_degrees=z,
))
def rotate(self, z):
return Shape(self.cross_section.rotate(z))
def scale(self, x=1, y=1):
return Shape(self.cross_section.scale((x, y)))
def simplify(self, eps):
return Shape(self.cross_section.simplify(eps))
def to_polygons(self):
return self.cross_section.to_polygons()
def transform(self, matrix):
return Shape(self.cross_section.transform(matrix))
def move(self, x=0, y=0):
return Shape(self.cross_section.translate((x,y)))
def warp(self, xy_map_func):
return Shape(self.cross_section.warp(xy_map_func))
def warp_batch(self, xy_map_func):
return Shape(self.cross_section.warp_batch(xy_map_func))
def get_circular_segments(radius):
return manifold3d.get_circular_segments(radius)
def set_circular_segments(nseg):
manifold3d.set_circular_segments(nseg)
def set_min_circular_angle(degrees):
manifold3d.set_min_circular_angle(degrees)
def set_min_circular_edge_length(length):
manifold3d.set_min_circular_edge_length(length)
def hull(*solids):
mans = [s.manifold for s in solids]
return Solid(Manifold.batch_hull(mans))
def hull_points(points):
return Shape(Manifold.hull_points(points))
def hull2d(*shapes):
sects = [s.cross_section for s in shapes]
return Shape(CrossSection.batch_hull(sects))
def hull2d_points(points):
return Shape(CrossSection.hull_points(points))
def cube(x=1, y=1, z=1, center=False):
return Solid(Manifold.cube((x, y, z), center=center))
def cylinder(h=1, d=1, r=None, center=False, fn=0, outer=False):
r = r or d/2
fn = fn or get_circular_segments(r)
s = 1/np.cos(np.pi/fn) if outer else 1
return Solid(Manifold.cylinder(
h, r*s, r*s, circular_segments=fn, center=center))
def conic(h=1, d1=1, d2=1, r1=None, r2=None, center=False, fn=0, outer=False):
r1 = r1 or d1/2
r2 = r2 or d2/2
fn = fn or get_circular_segments(max(r1,r2))
s = 1/np.cos(np.pi/fn) if outer else 1
return Solid(Manifold.cylinder(
h, r1*s, r2*s, circular_segments=fn, center=center))
def sphere(d=1, r=None, fn=0):
r = r or d/2
return Solid(Manifold.sphere(r, fn))
def circle(d=1, r=None, fn=0, outer=False):
r = r or d/2
fn = fn or get_circular_segments(r)
s = 1/np.cos(np.pi/fn) if outer else 1
return Shape(CrossSection.circle(r*s, fn))
def square(x=1, y=1, center=False):
return Shape(CrossSection.square((x, y), center=center))
def polygon(points, fill_rule='even_odd'):
if fill_rule == 'even_odd':
fill_rule = manifold3d.FillRule.EvenOdd
elif fill_rule == 'negative':
fill_rule = manifold3d.FillRule.Negative
elif fill_rule == 'non_zero':
fill_rule = manifold3d.FillRule.NonZero
elif fill_rule == 'positive':
fill_rule = manifold3d.FillRule.Positive
else:
raise ValueError(f'{fill_rule=}')
return Shape(CrossSection([points], fillrule=fill_rule))
def text(t, size=10, font="Helvetica", fn=8):
|
# wrapper for Manifold
# adds jupyter preview & tweaks API
class Solid:
def __init__(self, manifold = Manifold()):
self.manifold = manifold
# TODO add visual properties (e.g. color, texture)
def _repr_mimebundle_(self, **kwargs):
if self.is_empty():
return None
raw_mesh = self.to_mesh()
verts = raw_mesh.vert_properties.astype(np.float32)
tris = raw_mesh.tri_verts.astype(np.uint32)
renderer = display((verts, tris))
return renderer._repr_mimebundle_(**kwargs)
def __add__(self, other):
return Solid(self.manifold + other.manifold)
def __sub__(self, other):
return Solid(self.manifold - other.manifold)
def __and__(self, other):
# manifold3d XOR is actually AND
return Solid(self.manifold ^ other.manifold)
def as_original(self):
return Solid(self.manifold.as_original())
def bounding_box(self):
return self.manifold.bounding_box()
def calculate_curvature(self, gaussian_idx: int, mean_idx: int):
return Solid(self.manifold.calculate_curvature(gaussian_idx, mean_idx))
def align(self,
xmin=None, x=None, xmax=None,
ymin=None, y=None, ymax=None,
zmin=None, z=None, zmax=None):
x0, y0, z0, x1, y1, z1 = self.bounding_box()
dx, dy, dz = 0, 0, 0
if xmin is not None: dx = xmin-x0
if x is not None: dx = x-(x0+x1)/2
if xmax is not None: dx = xmax-x1
if ymin is not None: dy = ymin-y0
if y is not None: dy = y-(y0+y1)/2
if ymax is not None: dy = ymax-y1
if zmin is not None: dz = zmin-z0
if z is not None: dz = z-(z0+z1)/2
if zmax is not None: dz = zmax-z1
return self.move(dx, dy, dz)
def decompose(self):
return [Solid(m) for m in self.manifold.decompose()]
def genus(self):
return self.manifold.get_genus()
def get_surface_area(self):
return self.manifold.get_surface_area()
def get_volume(self):
return self.manifold.get_volume()
def hull(self, *others):
return Solid(Manifold.batch_hull([self.manifold, *[o.manifold for o in others]]))
def is_empty(self):
return self.manifold.is_empty()
def mirror(self, x=0, y=0, z=0):
return Solid(self.manifold.mirror((x, y, z)))
def num_edge(self):
return self.manifold.num_edge()
def num_prop(self):
return self.manifold.num_prop()
def num_prop_vert(self):
return self.manifold.num_prop_vert()
def num_tri(self):
return self.manifold.num_tri()
def num_vert(self):
return self.manifold.num_vert()
def original_id(self):
return self.manifold.original_id()
def precision(self):
return self.manifold.precision()
def refine(self, n=2):
return Solid(self.manifold.refine(n))
def rotate(self, x=0, y=0, z=0):
return Solid(self.manifold.rotate((x, y, z)))
def scale(self, x=1, y=1, z=1):
return Solid(self.manifold.scale((x, y, z)))
def set_properties(self, *args, **kwargs):
raise ValueError("not implemented")
def split(self, cutter):
inter, diff = self.manifold.split(cutter)
return Solid(inter), Solid(diff)
def split_by_plane(self, x=0, y=0, z=0, offset=0):
top, bottom = self.manifold.split_by_plane((x, y, z), offset)
return Solid(top), Solid(bottom)
def status(self):
return self.manifold.status
def to_mesh(self, normal_idx=[0,0,0]):
return self.manifold.to_mesh(normal_idx)
def transform(self, matrix):
return Solid(self.manifold.transform(matrix))
def move(self, x=0, y=0, z=0):
return Solid(self.manifold.translate((x,y,z)))
def trim_by_plane(self, x=0, y=0, z=0, offset=0):
return Solid(self.manifold.trim_by_plane((x, y, z), offset))
def warp(self, xyz_map_fn):
return Solid(self.manifold.warp(xyz_map_fn))
def warp_batch(self, xyz_map_fn):
return Solid(self.manifold.warp_batch(xyz_map_fn))
def refine_to_length(self, edge_len):
m = self.manifold.to_mesh()
verts = m.vert_properties.tolist()
tris = m.tri_verts.tolist()
mids = {}
i = 0
while i < len(tris):
tri = tris[i]
v = [verts[i] for i in tri]
dv = v - np.roll(v, 1, 0)
lens = np.linalg.norm(dv, axis=-1)
mi = np.argmax(lens)
if lens[mi] > edge_len:
key = (min(tri[mi],tri[mi-1]), max(tri[mi],tri[mi-1]))
if key not in mids:
mididx = len(verts)
midv = [(v[mi][j] + v[mi-1][j])/2 for j in [0,1,2]]
verts += [midv]
mids[key] = mididx
else:
mididx = mids[key]
tri2 = [*tri]
tri2[mi-1] = mididx
tris += [tri2]
tri[mi] = mididx
else:
i += 1
verts = np.array(verts, np.float32)
tris = np.array(tris, np.int32)
m = manifold3d.Mesh(verts, tris, face_id=np.arange(len(tris)))
return Solid(Manifold(m))
def stl(self, fname=None):
mesh = self.to_mesh()
tris = mesh.tri_verts.astype(np.uint32)
verts = mesh.vert_properties.astype(np.float32)
tnormals = triangle_normals(verts, tris)
ntris = tris.shape[0]
header = np.zeros(21, dtype=np.uint32)
header[20] = ntris
body = np.zeros((ntris, 50), dtype=np.uint8)
body[:, 0:12] = tnormals.view(np.uint8)
body[:, 12:24] = verts[tris[:,0]].view(np.int8)
body[:, 24:36] = verts[tris[:,1]].view(np.int8)
body[:, 36:48] = verts[tris[:,2]].view(np.int8)
binary = header.tobytes() + body.tobytes()
if fname:
with open(fname, 'wb') as f:
f.write(binary)
return self
else:
return binary
class Shape:
def __init__(self, cross_section = CrossSection()):
self.cross_section = cross_section
def _repr_mimebundle_(self, **kwargs):
# called by jupyter to figure out how to display this object
# we create a scene on the fly with ability to customize
# controls and lights, etc.
return self.extrude(1e-9)._repr_mimebundle_(**kwargs)
def __add__(self, other):
return Shape(self.cross_section + other.cross_section)
def __sub__(self, other):
return Shape(self.cross_section - other.cross_section)
def __and__(self, other):
# manifold3d XOR is actually AND
return Shape(self.cross_section ^ other.cross_section)
def area(self):
return self.cross_section.area()
def bounds(self):
return self.cross_section.bounds()
def align(self,
xmin=None, x=None, xmax=None,
ymin=None, y=None, ymax=None):
x0, y0, x1, y1 = self.bounds()
dx, dy = 0, 0
if xmin is not None: dx = xmin-x0
if x is not None: dx = x-(x0+x1)/2
if xmax is not None: dx = xmax-x1
if ymin is not None: dy = ymin-y0
if y is not None: dy = y-(y0+y1)/2
if ymax is not None: dy = ymax-y1
return self.move(dx, dy)
def decompose(self):
return [Shape(p) for p in self.cross_section.decompose()]
def extrude(self, height, fn=0, twist=0, scale_top=(1,1), center=False):
s = Solid(self.cross_section.extrude(
height,
n_divisions=fn,
twist_degrees=twist,
scale_top=scale_top,
))
return s.move(z=-height/2) if center else s
def extrude_to(self, other, height, center=False):
polys1 = self.to_polygons()
assert len(polys1) == 1, 'extrude_to only supports simple polygons'
verts1 = np.pad(polys1[0], [[0,0],[0,1]], constant_values=0)
N1 = verts1.shape[0]
polys2 = other.to_polygons()
assert len(polys2) == 1, 'extrude_to only supports simple polygons'
verts2 = np.pad(polys2[0], [[0,0],[0,1]], constant_values=height)
# flip the bottom over
tris1 = manifold3d.triangulate(polys1)
tmp = tris1[:, 1].copy()
tris1[:, 1] = tris1[:, 2]
tris1[:, 2] = tmp
# offset top vertex indices
tris2 = manifold3d.triangulate(polys2)
tris2 += N1
alignment = polygon_nearest_alignment(verts1, verts2)
alignment = [(a, b+N1) for a, b in alignment]
# build the skirt faces
tris3 = []
for s in range(len(alignment)):
i, j = alignment[s]
pi, pj = alignment[s-1]
if i != pi:
tris3 += [[pi, i, pj]]
if j != pj:
tris3 += [[i, j, pj]]
tris3 = np.array(tris3)
verts = np.concatenate((verts1, verts2))
tris = np.concatenate((tris1, tris2, tris3))
mesh = manifold3d.Mesh(verts, tris)
s = Solid(Manifold(mesh))
return s.move(z=-height/2) if center else s
def hull(self, *others):
return Shape(CrossSection.batch_hull([self.cross_section, *[o.cross_section for o in others]]))
def is_empty(self):
return self.cross_section.is_empty()
def mirror(self, x=0, y=0):
return Shape(self.cross_section.mirror((x, y)))
def num_contour(self):
return self.cross_section.num_contour()
def num_vert(self):
return self.cross_section.num_vert()
def offset(self, delta, join_type='miter', miter_limit=2, circular_segments=0):
if join_type == 'round':
join_type = manifold3d.JoinType.Round
elif join_type == 'miter':
join_type = manifold3d.JoinType.Miter
elif join_type == 'square':
join_type = manifold3d.JoinType.Square
else:
raise ValueError(f'{join_type=}')
return Shape(self.cross_section.offset(
delta, join_type, miter_limit, circular_segments
))
def revolve(self, z=360, fn=0):
return Solid(self.cross_section.revolve(
circular_segments=fn,
revolve_degrees=z,
))
def rotate(self, z):
return Shape(self.cross_section.rotate(z))
def scale(self, x=1, y=1):
return Shape(self.cross_section.scale((x, y)))
def simplify(self, eps):
return Shape(self.cross_section.simplify(eps))
def to_polygons(self):
return self.cross_section.to_polygons()
def transform(self, matrix):
return Shape(self.cross_section.transform(matrix))
def move(self, x=0, y=0):
return Shape(self.cross_section.translate((x,y)))
def warp(self, xy_map_func):
return Shape(self.cross_section.warp(xy_map_func))
def warp_batch(self, xy_map_func):
return Shape(self.cross_section.warp_batch(xy_map_func))
def get_circular_segments(radius):
return manifold3d.get_circular_segments(radius)
def set_circular_segments(nseg):
manifold3d.set_circular_segments(nseg)
def set_min_circular_angle(degrees):
manifold3d.set_min_circular_angle(degrees)
def set_min_circular_edge_length(length):
manifold3d.set_min_circular_edge_length(length)
def hull(*solids):
mans = [s.manifold for s in solids]
return Solid(Manifold.batch_hull(mans))
def hull_points(points):
return Shape(Manifold.hull_points(points))
def hull2d(*shapes):
sects = [s.cross_section for s in shapes]
return Shape(CrossSection.batch_hull(sects))
def hull2d_points(points):
return Shape(CrossSection.hull_points(points))
def cube(x=1, y=1, z=1, center=False):
return Solid(Manifold.cube((x, y, z), center=center))
def cylinder(h=1, d=1, r=None, center=False, fn=0, outer=False):
r = r or d/2
fn = fn or get_circular_segments(r)
s = 1/np.cos(np.pi/fn) if outer else 1
return Solid(Manifold.cylinder(
h, r*s, r*s, circular_segments=fn, center=center))
def conic(h=1, d1=1, d2=1, r1=None, r2=None, center=False, fn=0, outer=False):
r1 = r1 or d1/2
r2 = r2 or d2/2
fn = fn or get_circular_segments(max(r1,r2))
s = 1/np.cos(np.pi/fn) if outer else 1
return Solid(Manifold.cylinder(
h, r1*s, r2*s, circular_segments=fn, center=center))
def sphere(d=1, r=None, fn=0):
r = r or d/2
return Solid(Manifold.sphere(r, fn))
def circle(d=1, r=None, fn=0, outer=False):
r = r or d/2
fn = fn or get_circular_segments(r)
s = 1/np.cos(np.pi/fn) if outer else 1
return Shape(CrossSection.circle(r*s, fn))
def square(x=1, y=1, center=False):
return Shape(CrossSection.square((x, y), center=center))
def polygon(points, fill_rule='even_odd'):
if fill_rule == 'even_odd':
fill_rule = manifold3d.FillRule.EvenOdd
elif fill_rule == 'negative':
fill_rule = manifold3d.FillRule.Negative
elif fill_rule == 'non_zero':
fill_rule = manifold3d.FillRule.NonZero
elif fill_rule == 'positive':
fill_rule = manifold3d.FillRule.Positive
else:
raise ValueError(f'{fill_rule=}')
return Shape(CrossSection([points], fillrule=fill_rule))
def text(t, size=10, font="Helvetica", fn=8): | polys = svg2polygons(text2svg(t, size=size, font=font), fn=fn) | 4 | 2023-12-11 01:48:22+00:00 | 8k |
PeriniM/Rotary-Pendulum-RL | control/pid/src/main_energy.py | [
{
"identifier": "RealPendulumEnv",
"path": "control/reinforcement_learning/Environments/RealPendulumEnv.py",
"snippet": "class RealPendulumEnv(gym.Env):\n \"\"\"\n Real rotary pendulum with ESP32\n \"\"\"\n\n metadata = {\"render_modes\": [\"human\"]}\n\n def __init__(self, port, baudrate... | from ...reinforcement_learning.Environments import RealPendulumEnv as real
from ...reinforcement_learning.Environments import PyBulletPendulumEnv as pybullet
from ..classes.EnergyController import EnergyController
import numpy as np
import time
| 6,298 |
real_pendulum = False
# Example usage
energy_controller = EnergyController()
K_motor = 0.0
desired_bar_angle = 0
desired_bar_velocity = 0
desired_motor_angle = 0
if real_pendulum:
# initialize RealPendulum environment
env = real.RealPendulumEnv("COM3", 115200)
else:
# initialize PyBulletPendulum environment
|
real_pendulum = False
# Example usage
energy_controller = EnergyController()
K_motor = 0.0
desired_bar_angle = 0
desired_bar_velocity = 0
desired_motor_angle = 0
if real_pendulum:
# initialize RealPendulum environment
env = real.RealPendulumEnv("COM3", 115200)
else:
# initialize PyBulletPendulum environment
| env = pybullet.PyBulletPendulumEnv(render_mode='human')
| 2 | 2023-12-09 11:22:54+00:00 | 8k |
JayYik/GAN_implementations | utils/get_model.py | [
{
"identifier": "DCGAN",
"path": "models/DCGAN.py",
"snippet": "class DCGAN(nn.Module):\n def __init__(self, args):\n super(DCGAN, self).__init__()\n self.G=DCGAN_G(args.hw,args.z_dim,args.in_channels)\n self.D=DCGAN_D(args.hw,args.in_channels)\n # self.G.weight_init()\n ... | import torch
from models.DCGAN import DCGAN
from models.GAN import GAN
from models.WGAN import WGAN_CP
from models.WGAN_GP import WGAN_GP | 7,182 |
def get_model(args):
if args.model == 'DCGAN':
net=DCGAN(args)
elif args.model == 'GAN':
|
def get_model(args):
if args.model == 'DCGAN':
net=DCGAN(args)
elif args.model == 'GAN': | net=GAN(args) | 1 | 2023-12-12 06:24:31+00:00 | 8k |
anyquest/pyaq | aq/jobs/scheduler.py | [
{
"identifier": "JobManager",
"path": "aq/jobs/manager.py",
"snippet": "class JobManager:\n app_jobs: Dict[str, AppJob]\n activity_jobs: Dict[str, Dict[str, List[ActivityJob]]]\n\n def __init__(self):\n self.app_jobs = {}\n self.activity_jobs = {}\n self._logger = logging.g... | import asyncio
import logging
import time
from typing import Dict, Any
from .manager import JobManager, AppJobError
from ..activities import (
ReadActivity,
WriteActivity,
SummarizeActivity,
GenerateActivity,
ExtractActivity,
StoreActivity,
RetrieveActivity,
FunctionActivity,
ReturnActivity
)
from ..types import ActivityType, ActivityJob, JobState | 5,683 |
class WorkItem:
job: ActivityJob
inputs: Dict[str, Any]
def __init__(self, job: ActivityJob, inputs: Dict[str, Any]) -> None:
self.job = job
self.inputs = inputs
class JobScheduler:
def __init__(self, config: Dict[str, Any], job_manager: JobManager,
read_activity: ReadActivity, write_activity: WriteActivity,
summarize_activity: SummarizeActivity, generate_activity: GenerateActivity,
extract_activity: ExtractActivity, store_activity: StoreActivity, retrieve_activity: RetrieveActivity,
|
class WorkItem:
job: ActivityJob
inputs: Dict[str, Any]
def __init__(self, job: ActivityJob, inputs: Dict[str, Any]) -> None:
self.job = job
self.inputs = inputs
class JobScheduler:
def __init__(self, config: Dict[str, Any], job_manager: JobManager,
read_activity: ReadActivity, write_activity: WriteActivity,
summarize_activity: SummarizeActivity, generate_activity: GenerateActivity,
extract_activity: ExtractActivity, store_activity: StoreActivity, retrieve_activity: RetrieveActivity, | function_activity: FunctionActivity, return_activity: ReturnActivity): | 9 | 2023-12-14 13:25:52+00:00 | 8k |
multimodallearning/DG-TTA | dg_tta/run.py | [
{
"identifier": "inject_dg_trainers_into_nnunet",
"path": "dg_tta/__build__.py",
"snippet": "def inject_dg_trainers_into_nnunet(num_epochs=1000):\n dg_trainer_paths = Path(pretraining.__file__).parent.glob(\"nnUNetTrainer*.py\")\n target_dir = Path(nnunetv2.__path__[0], \"training/nnUNetTrainer/va... | import sys
import re
import json
import argparse
import json
import torch
import torch.nn.functional as F
import randomname
import dg_tta
from pathlib import Path
from datetime import datetime
from nnunetv2.run.run_training import run_training_entry as nnunet_run_training_main
from dg_tta.__build__ import inject_dg_trainers_into_nnunet, check_trainers_injected
from dg_tta.utils import check_dga_root_is_set
from dg_tta.tta.torch_utils import generate_label_mapping
from dg_tta.tta.config_log_utils import (
wandb_run,
load_current_modifier_functions,
get_tta_folders,
wandb_run_is_available,
check_dataset_pretrain_config,
)
from dg_tta.tta.tta import tta_main | 6,321 | "--pretrainer",
help="Trainer to use for pretraining",
default=None,
)
parser.add_argument(
"--pretrainer_config",
help="Fold ID of nnUNet model to use for pretraining",
default="3d_fullres",
)
parser.add_argument(
"--pretrainer_fold",
help="Fold ID of nnUNet model to use for pretraining",
default="0",
)
parser.add_argument(
"--tta_dataset_bucket",
help="""Can be one of ['imagesTr', 'imagesTs', 'imagesTrAndTs']""",
default="imagesTs",
)
args = parser.parse_args(sys.argv[2:])
pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config(
args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold
)
dg_tta.tta.config_log_utils.prepare_tta(
pretrained_dataset_id,
int(args.tta_dataset_id),
pretrainer=pretrainer,
pretrainer_config=pretrainer_config,
pretrainer_fold=pretrainer_fold,
tta_dataset_bucket=args.tta_dataset_bucket,
)
def run_tta(self):
check_trainers_injected()
parser = argparse.ArgumentParser(description="Run DG-TTA")
parser.add_argument(
"pretrained_dataset_id",
help="""
Task ID for pretrained model.
Can be numeric or one of ['TS104_GIN', 'TS104_MIND', 'TS104_GIN_MIND']""",
)
parser.add_argument("tta_dataset_id", help="Task ID for TTA")
parser.add_argument(
"--pretrainer",
help="Trainer to use for pretraining",
default=None,
)
parser.add_argument(
"--pretrainer_config",
help="Fold ID of nnUNet model to use for pretraining",
default="3d_fullres",
)
parser.add_argument(
"--pretrainer_fold",
help="Fold ID of nnUNet model to use for pretraining",
default="0",
)
parser.add_argument("--device", help="Device to be used", default="cuda")
args = parser.parse_args(sys.argv[2:])
pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config(
args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold
)
(
tta_data_dir,
plan_dir,
results_dir,
pretrained_dataset_name,
tta_dataset_name,
) = get_tta_folders(
pretrained_dataset_id,
int(args.tta_dataset_id),
pretrainer,
pretrainer_config,
pretrainer_fold,
)
now_str = datetime.now().strftime("%Y%m%d__%H_%M_%S")
numbers = [
int(re.search(r"[0-9]+$", str(_path))[0]) for _path in results_dir.iterdir()
]
if len(numbers) == 0:
run_no = 0
else:
run_no = torch.as_tensor(numbers).max().item() + 1
run_name = f"{now_str}_{randomname.get_name()}-{run_no}"
with open(Path(plan_dir / "tta_plan.json"), "r") as f:
config = json.load(f)
with open(
Path(plan_dir) / f"{pretrained_dataset_name}_label_mapping.json", "r"
) as f:
pretrained_label_mapping = json.load(f)
with open(Path(plan_dir) / f"{tta_dataset_name}_label_mapping.json", "r") as f:
tta_dataset_label_mapping = json.load(f)
label_mapping = generate_label_mapping(
pretrained_label_mapping, tta_dataset_label_mapping
)
modifier_fn_module = load_current_modifier_functions(plan_dir)
device = torch.device(args.device)
kwargs = dict(
run_name=run_name,
config=config,
tta_data_dir=tta_data_dir,
save_base_path=results_dir,
label_mapping=label_mapping,
modifier_fn_module=modifier_fn_module,
device=device,
)
|
PROJECT_NAME = "nnunet_tta"
class DGTTAProgram:
def __init__(self):
parser = argparse.ArgumentParser(
description="DG-TTA for nnUNetv2",
usage="""dgtta <command> [<args>]
Commands are:
inject_trainers Inject DG trainers into nnUNet module
pretrain Pretrain on a dataset with DG trainers
prepare_tta Prepare test-time adaptation
run_tta Run test-time adaptation
""",
)
parser.add_argument("command", help="Subcommand to run")
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print("Unrecognized command")
parser.print_help()
exit(1)
getattr(self, args.command)()
def inject_trainers(self):
parser = argparse.ArgumentParser(
description="Inject DG-TTA trainers into nnUNet module code"
)
parser.add_argument(
"--num_epochs", type=int, default=1000, help="Number of epochs to train"
)
args = parser.parse_args(sys.argv[2:])
inject_dg_trainers_into_nnunet(args.num_epochs)
def pretrain(self):
check_trainers_injected()
print("Dispatching into nnUNetv2_train.")
sys.argv = sys.argv[2:]
sys.argv.insert(0, "nnUNetv2_train")
nnunet_run_training_main()
def prepare_tta(self):
check_trainers_injected()
parser = argparse.ArgumentParser(
description="Prepare DG-TTA", usage="""dgtta prepare_tta [-h]"""
)
parser.add_argument(
"pretrained_dataset_id",
help="""
Task ID for pretrained model.
Can be numeric or one of ['TS104_GIN', 'TS104_MIND', 'TS104_GIN_MIND']""",
)
parser.add_argument("tta_dataset_id", help="Task ID for TTA")
parser.add_argument(
"--pretrainer",
help="Trainer to use for pretraining",
default=None,
)
parser.add_argument(
"--pretrainer_config",
help="Fold ID of nnUNet model to use for pretraining",
default="3d_fullres",
)
parser.add_argument(
"--pretrainer_fold",
help="Fold ID of nnUNet model to use for pretraining",
default="0",
)
parser.add_argument(
"--tta_dataset_bucket",
help="""Can be one of ['imagesTr', 'imagesTs', 'imagesTrAndTs']""",
default="imagesTs",
)
args = parser.parse_args(sys.argv[2:])
pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config(
args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold
)
dg_tta.tta.config_log_utils.prepare_tta(
pretrained_dataset_id,
int(args.tta_dataset_id),
pretrainer=pretrainer,
pretrainer_config=pretrainer_config,
pretrainer_fold=pretrainer_fold,
tta_dataset_bucket=args.tta_dataset_bucket,
)
def run_tta(self):
check_trainers_injected()
parser = argparse.ArgumentParser(description="Run DG-TTA")
parser.add_argument(
"pretrained_dataset_id",
help="""
Task ID for pretrained model.
Can be numeric or one of ['TS104_GIN', 'TS104_MIND', 'TS104_GIN_MIND']""",
)
parser.add_argument("tta_dataset_id", help="Task ID for TTA")
parser.add_argument(
"--pretrainer",
help="Trainer to use for pretraining",
default=None,
)
parser.add_argument(
"--pretrainer_config",
help="Fold ID of nnUNet model to use for pretraining",
default="3d_fullres",
)
parser.add_argument(
"--pretrainer_fold",
help="Fold ID of nnUNet model to use for pretraining",
default="0",
)
parser.add_argument("--device", help="Device to be used", default="cuda")
args = parser.parse_args(sys.argv[2:])
pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config(
args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold
)
(
tta_data_dir,
plan_dir,
results_dir,
pretrained_dataset_name,
tta_dataset_name,
) = get_tta_folders(
pretrained_dataset_id,
int(args.tta_dataset_id),
pretrainer,
pretrainer_config,
pretrainer_fold,
)
now_str = datetime.now().strftime("%Y%m%d__%H_%M_%S")
numbers = [
int(re.search(r"[0-9]+$", str(_path))[0]) for _path in results_dir.iterdir()
]
if len(numbers) == 0:
run_no = 0
else:
run_no = torch.as_tensor(numbers).max().item() + 1
run_name = f"{now_str}_{randomname.get_name()}-{run_no}"
with open(Path(plan_dir / "tta_plan.json"), "r") as f:
config = json.load(f)
with open(
Path(plan_dir) / f"{pretrained_dataset_name}_label_mapping.json", "r"
) as f:
pretrained_label_mapping = json.load(f)
with open(Path(plan_dir) / f"{tta_dataset_name}_label_mapping.json", "r") as f:
tta_dataset_label_mapping = json.load(f)
label_mapping = generate_label_mapping(
pretrained_label_mapping, tta_dataset_label_mapping
)
modifier_fn_module = load_current_modifier_functions(plan_dir)
device = torch.device(args.device)
kwargs = dict(
run_name=run_name,
config=config,
tta_data_dir=tta_data_dir,
save_base_path=results_dir,
label_mapping=label_mapping,
modifier_fn_module=modifier_fn_module,
device=device,
)
| if wandb_run_is_available(): | 7 | 2023-12-08 08:43:11+00:00 | 8k |
tommy-xq/SA2VP | datasets.py | [
{
"identifier": "RandomResizedCropAndInterpolationWithTwoPic",
"path": "transforms.py",
"snippet": "class RandomResizedCropAndInterpolationWithTwoPic:\n \"\"\"Crop the given PIL Image to random size and aspect ratio with random interpolation.\n\n A crop of random size (default: of 0.08 to 1.0) of ... | import os
import torch
import json
import PIL.Image
import random
import pickle
import numpy as np
import csv
import scipy.io as sio
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from transforms import RandomResizedCropAndInterpolationWithTwoPic, RandomResizedCropAndInterpolationWithTwoPicVal
from timm.data import create_transform
from dataset_folder import ImageFolder
from timm.data.transforms import str_to_interp_mode
from pathlib import Path
from typing import Any, Tuple, Callable, Optional
from torchvision.datasets.utils import verify_str_arg, download_and_extract_archive,check_integrity
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets.folder import make_dataset
from PIL import Image | 5,804 | train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'val.json')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'test.json') # test
else:
train_list_path = None
test_list_path = None
self.samples = []
if train:
with open(train_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'Images/'+name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'Images/'+name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
class FGVC_car(datasets.folder.ImageFolder):
def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs):
self.dataset_root = root
self.loader = datasets.folder.default_loader
self.target_transform = None
self.transform = transform
if my_mode == 'train_val':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'val.json')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'test.json') # test
else:
train_list_path = None
test_list_path = None
self.samples = []
if train:
with open(train_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
class DataAugmentationForBEiT(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
self.common_transform = transforms.Compose([
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(p=0.5),
RandomResizedCropAndInterpolationWithTwoPic(
size=args.input_size, second_size=args.second_input_size,
interpolation=args.train_interpolation, second_interpolation=args.second_interpolation,
),
])
self.patch_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
])
if args.discrete_vae_type == "dall-e":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
map_pixels,
])
elif args.discrete_vae_type == "customized":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=IMAGENET_INCEPTION_MEAN,
std=IMAGENET_INCEPTION_STD,
),
])
else:
raise NotImplementedError()
def __call__(self, image):
for_patches, for_visual_tokens = self.common_transform(image)
return \
self.patch_transform(for_patches), self.visual_token_transform(for_visual_tokens)
def __repr__(self):
repr = "(DataAugmentationForBEiT,\n"
repr += " common_transform = %s,\n" % str(self.common_transform)
repr += " patch_transform = %s,\n" % str(self.patch_transform)
repr += " visual_tokens_transform = %s,\n" % str(self.visual_token_transform)
repr += ")"
return repr
class DataAugmentationForBEiT_val(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
self.common_transform = transforms.Compose([
| # --------------------------------------------------------
# SA2VP: Spatially Aligned-and-Adapted Visual Prompt code
# reference:
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Based on timm
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
# for food 101
# for add new datesets =========================
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
.. note::
The partition only changes which split each image belongs to. Thus, regardless of the selected
partition, combining all splits will result in all images.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
_MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
def __init__(
self,
root: str,
split: str = "train",
partition: int = 1,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
if not isinstance(partition, int) and not (1 <= partition <= 10):
raise ValueError(
f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
f"but got {partition} instead"
)
self._partition = partition
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = Path(root)
self._data_folder = self._base_folder / "dtd"
self._meta_folder = self._data_folder / "labels"
self._images_folder = self._data_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._image_files = []
classes = []
with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
for line in file:
cls, name = line.strip().split("/")
self._image_files.append(self._images_folder.joinpath(cls, name))
classes.append(cls)
self.classes = sorted(set(classes))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._labels = [self.class_to_idx[cls] for cls in classes]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx):
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
# sample = {
# "image": image,
# "label": label
# }
return image, label
def extra_repr(self) -> str:
return f"split={self._split}, partition={self._partition}"
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
classes = [
'red and white circle 20 kph speed limit',
'red and white circle 30 kph speed limit',
'red and white circle 50 kph speed limit',
'red and white circle 60 kph speed limit',
'red and white circle 70 kph speed limit',
'red and white circle 80 kph speed limit',
'end / de-restriction of 80 kph speed limit',
'red and white circle 100 kph speed limit',
'red and white circle 120 kph speed limit',
'red and white circle red car and black car no passing',
'red and white circle red truck and black car no passing',
'red and white triangle road intersection warning',
'white and yellow diamond priority road',
'red and white upside down triangle yield right-of-way',
'stop',
'empty red and white circle',
'red and white circle no truck entry',
'red circle with white horizonal stripe no entry',
'red and white triangle with exclamation mark warning',
'red and white triangle with black left curve approaching warning',
'red and white triangle with black right curve approaching warning',
'red and white triangle with black double curve approaching warning',
'red and white triangle rough / bumpy road warning',
'red and white triangle car skidding / slipping warning',
'red and white triangle with merging / narrow lanes warning',
'red and white triangle with person digging / construction / road work warning',
'red and white triangle with traffic light approaching warning',
'red and white triangle with person walking warning',
'red and white triangle with child and person walking warning',
'red and white triangle with bicyle warning',
'red and white triangle with snowflake / ice warning',
'red and white triangle with deer warning',
'white circle with gray strike bar no speed limit',
'blue circle with white right turn arrow mandatory',
'blue circle with white left turn arrow mandatory',
'blue circle with white forward arrow mandatory',
'blue circle with white forward or right turn arrow mandatory',
'blue circle with white forward or left turn arrow mandatory',
'blue circle with white keep right arrow mandatory',
'blue circle with white keep left arrow mandatory',
'blue circle with white arrows indicating a traffic circle',
'white circle with gray strike bar indicating no passing for cars has ended',
'white circle with gray strike bar indicating no passing for trucks has ended',
]
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: str,
# args,
split: str = "train",
percentage: float = 0.8,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(root) / "gtsrb"
self._target_folder = (
self._base_folder / "GTSRB" / ("Training" if self._split in ["train", "val"] else "Final_Test/Images")
)
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
if self._split in ["train", "val"]:
samples = make_dataset(str(self._target_folder), extensions=(".ppm",))
else:
with open(self._base_folder / "GT-final_test.csv") as csv_file:
samples = [
(str(self._target_folder / row["Filename"]), int(row["ClassId"]))
for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True)
]
# self._samples = samples
# self.transform = transform
# self.target_transform = target_transform
if split in ["train", "val"]:
random.shuffle(samples)
else:
self._samples = samples
if split == "train":
self._samples = samples[:int(percentage*len(samples))]
if split == "val":
self._samples = samples[int(percentage*len(samples)):]
self.classes = ['a zoomed in photo of a {} traffic sign.'.format(class_name) \
for class_name in classes]
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
path, target = self._samples[index]
sample = PIL.Image.open(path).convert("RGB")
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
#data = {
#"image": sample,
#"label": target
#}
return sample,target
def _check_exists(self) -> bool:
return self._target_folder.is_dir()
def download(self) -> None:
if self._check_exists():
return
base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
if self._split in ["train", "val"]:
download_and_extract_archive(
f"{base_url}GTSRB-Training_fixed.zip",
download_root=str(self._base_folder),
md5="513f3c79a4c5141765e10e952eaa2478",
)
else:
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_Images.zip",
download_root=str(self._base_folder),
md5="c7e4e6327067d32654124b0fe9e82185",
)
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_GT.zip",
download_root=str(self._base_folder),
md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5",
)
class Food101(VisionDataset):
"""`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_.
The Food-101 is a challenging data set of 101 food categories, with 101'000 images.
For each class, 250 manually reviewed test images are provided as well as 750 training images.
On purpose, the training images were not cleaned, and thus still contain some amount of noise.
This comes mostly in the form of intense colors and sometimes wrong labels. All images were
rescaled to have a maximum side length of 512 pixels.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
_MD5 = "85eeb15f3717b99a5da872d97d918f87"
def __init__(
self,
root: str,
# args,
split: str = "train",
percentage: float = 0.8,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "food-101"
self._meta_folder = self._base_folder / "meta"
self._images_folder = self._base_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._labels = []
self._image_files = []
split_name = "test" if split == "test" else "train"
with open(self._meta_folder / f"{split_name}.json") as f:
metadata = json.loads(f.read())
self.classes = sorted(metadata.keys())
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
for class_label, im_rel_paths in metadata.items():
self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
self._image_files += [
self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
]
if split in ["train", "val"]:
data_zip = list(zip(self._labels, self._image_files))
random.shuffle(data_zip)
self._labels[:], self._image_files[:] = zip(*data_zip)
del data_zip
if split == "train":
self._labels = self._labels[:int(percentage*len(self._labels))]
self._image_files = self._image_files[:int(percentage*len(self._image_files))]
if split == "val":
self._labels = self._labels[int(percentage*len(self._labels)):]
self._image_files = self._image_files[int(percentage*len(self._image_files)):]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
sample = {
"image": image,
"label": label
}
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
class CIFAR10(VisionDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = "cifar-10-batches-py"
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = "c58f30108f718f92721af3b95e74349a"
train_list = [
["data_batch_1", "c99cafc152244af753f735de768cd75f"],
["data_batch_2", "d4bba439e000b95fd0a9bffe97cbabec"],
["data_batch_3", "54ebc095f3ab1f0389bbae665268c751"],
["data_batch_4", "634d18415352ddfa80567beed471001a"],
["data_batch_5", "482c414d41f54cd18b22e5b47cb7c3cb"],
]
test_list = [
["test_batch", "40351d587109b95175f43aff81a1287e"],
]
meta = {
"filename": "batches.meta",
"key": "label_names",
"md5": "5ff9c542aee3614f3951f8cda6e48888",
}
def __init__(
self,
root: str,
# args,
split: str = "train",
percentage: float = 0.8,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = split # training set or test set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
if self.split == "train" or self.split == "val":
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data: Any = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, "rb") as f:
entry = pickle.load(f, encoding="latin1")
self.data.append(entry["data"])
if "labels" in entry:
self.targets.extend(entry["labels"])
else:
self.targets.extend(entry["fine_labels"])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
if self.split == "train":
self.data = self.data[:int(percentage*len(self.data))]
self.targets = self.targets[:int(percentage*len(self.targets))]
if self.split == "val":
self.data = self.data[int(percentage*len(self.data)):]
self.targets = self.targets[int(percentage*len(self.targets)):]
self._load_meta()
def _load_meta(self) -> None:
path = os.path.join(self.root, self.base_folder, self.meta["filename"])
if not check_integrity(path, self.meta["md5"]):
raise RuntimeError("Dataset metadata file not found or corrupted. You can use download=True to download it")
with open(path, "rb") as infile:
data = pickle.load(infile, encoding="latin1")
self.classes = data[self.meta["key"]]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
#sample = {
#"image": img,
#"label": target
#}
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
for fentry in self.train_list + self.test_list:
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self) -> None:
if self._check_integrity():
logger.info("Files already downloaded and verified")
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
def extra_repr(self) -> str:
split = self.split
return f"Split: {split}"
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = "cifar-100-python"
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = "eb9058c3a382ffc7106e4002c42a8d85"
train_list = [
["train", "16019d7e3df5f24257cddd939b257f8d"],
]
test_list = [
["test", "f0ef6b0ae62326f3e7ffdfab6717acfc"],
]
meta = {
"filename": "meta",
"key": "fine_label_names",
"md5": "7973b15100ade9c7d40fb424638fde48",
}
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format.
Args:
root (string): Root directory of dataset where directory
``SVHN`` exists.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"val": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: str,
# args,
split: str = "train",
percentage: float = 0.8,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
if split == "train":
self.labels = self.labels[:int(percentage*len(self.labels))]
self.data = self.data[:int(percentage*len(self.data))]
if split == "val":
self.labels = self.labels[int(percentage*len(self.labels)):]
self.data = self.data[int(percentage*len(self.data)):]
self.classes = [str(class_name) for class_name in sorted(list(set(self.labels)))]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
#sample = {
#"image": img,
#"label": target
#}
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
# end =========================
"""
class Food101(VisionDataset):
_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
_MD5 = "85eeb15f3717b99a5da872d97d918f87"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = Path(self.root) / "food-101"
self._meta_folder = self._base_folder / "meta"
self._images_folder = self._base_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._labels = []
self._image_files = []
with open(self._meta_folder / f"{split}.json") as f:
metadata = json.loads(f.read())
self.classes = sorted(metadata.keys())
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
for class_label, im_rel_paths in metadata.items():
self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
self._image_files += [
self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
"""
class VTAB(datasets.folder.ImageFolder):
def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs):
self.dataset_root = root
self.loader = datasets.folder.default_loader
self.target_transform = None
self.transform = transform
if my_mode == 'train_val':
train_list_path = os.path.join(self.dataset_root, 'train800.txt')
test_list_path = os.path.join(self.dataset_root, 'val200.txt')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train800val200.txt')
test_list_path = os.path.join(self.dataset_root, 'test.txt') # test
else:
train_list_path = os.path.join(self.dataset_root, 'train800val200.txt')
test_list_path = os.path.join(self.dataset_root, 'val200.txt')
self.samples = []
if train:
with open(train_list_path, 'r') as f:
for line in f:
img_name = line.split(' ')[0]
label = int(line.split(' ')[1])
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
for line in f:
img_name = line.split(' ')[0]
label = int(line.split(' ')[1])
self.samples.append((os.path.join(root,img_name), label))
class FGVC_cub(datasets.folder.ImageFolder):
def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs):
self.dataset_root = root
self.loader = datasets.folder.default_loader
self.target_transform = None
self.transform = transform
if my_mode == 'train_val':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'val.json')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'test.json') # test
else:
train_list_path = None
test_list_path = None
self.samples = []
if train:
with open(train_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'images/'+name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'images/'+name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
class FGVC_bird(datasets.folder.ImageFolder):
def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs):
self.dataset_root = root
self.loader = datasets.folder.default_loader
self.target_transform = None
self.transform = transform
if my_mode == 'train_val':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'val.json')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'test.json') # test
else:
train_list_path = None
test_list_path = None
self.samples = []
if train:
with open(train_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'images/'+name
label = int(content[name])
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'images/'+name
label = int(content[name])
self.samples.append((os.path.join(root,img_name), label))
class FGVC_flower(datasets.folder.ImageFolder):
def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs):
self.dataset_root = root
self.loader = datasets.folder.default_loader
self.target_transform = None
self.transform = transform
if my_mode == 'train_val':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'val.json')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'test.json') # test
else:
train_list_path = None
test_list_path = None
self.samples = []
if train:
with open(train_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
class FGVC_dog(datasets.folder.ImageFolder):
def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs):
self.dataset_root = root
self.loader = datasets.folder.default_loader
self.target_transform = None
self.transform = transform
if my_mode == 'train_val':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'val.json')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'test.json') # test
else:
train_list_path = None
test_list_path = None
self.samples = []
if train:
with open(train_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'Images/'+name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = 'Images/'+name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
class FGVC_car(datasets.folder.ImageFolder):
def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs):
self.dataset_root = root
self.loader = datasets.folder.default_loader
self.target_transform = None
self.transform = transform
if my_mode == 'train_val':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'val.json')
elif my_mode == 'trainval_test':
train_list_path = os.path.join(self.dataset_root, 'train.json')
test_list_path = os.path.join(self.dataset_root, 'test.json') # test
else:
train_list_path = None
test_list_path = None
self.samples = []
if train:
with open(train_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
else:
with open(test_list_path, 'r') as f:
content = json.load(f)
for name in content:
img_name = name
label = int(content[name])-1
self.samples.append((os.path.join(root,img_name), label))
class DataAugmentationForBEiT(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
self.common_transform = transforms.Compose([
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(p=0.5),
RandomResizedCropAndInterpolationWithTwoPic(
size=args.input_size, second_size=args.second_input_size,
interpolation=args.train_interpolation, second_interpolation=args.second_interpolation,
),
])
self.patch_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
])
if args.discrete_vae_type == "dall-e":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
map_pixels,
])
elif args.discrete_vae_type == "customized":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=IMAGENET_INCEPTION_MEAN,
std=IMAGENET_INCEPTION_STD,
),
])
else:
raise NotImplementedError()
def __call__(self, image):
for_patches, for_visual_tokens = self.common_transform(image)
return \
self.patch_transform(for_patches), self.visual_token_transform(for_visual_tokens)
def __repr__(self):
repr = "(DataAugmentationForBEiT,\n"
repr += " common_transform = %s,\n" % str(self.common_transform)
repr += " patch_transform = %s,\n" % str(self.patch_transform)
repr += " visual_tokens_transform = %s,\n" % str(self.visual_token_transform)
repr += ")"
return repr
class DataAugmentationForBEiT_val(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
self.common_transform = transforms.Compose([ | RandomResizedCropAndInterpolationWithTwoPicVal( | 1 | 2023-12-12 13:19:17+00:00 | 8k |
crashdev226/freelancer-create-account-bot | bot_create.py | [
{
"identifier": "FreelancerBot",
"path": "FreelancerBot.py",
"snippet": "class FreelancerBot:\n # Constructor\n def __init__(self):\n pass\n def create(self): \n profile = None\n with open(\"./profile.json\", \"r+\") as file:\n profile = json.load(file)[\"... | import argparse
from FreelancerBot import FreelancerBot
from UpworkBot import UpworkBot | 4,501 | parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", help="Specify whether freelancer or upwork.")
args = parser.parse_args()
account_type=args.type
if not isinstance(account_type, str):
raise TypeError("Missing/Incorrect account_type")
if account_type == "freelancer":
bot=FreelancerBot()
bot.create()
if account_type == "upwork":
| parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", help="Specify whether freelancer or upwork.")
args = parser.parse_args()
account_type=args.type
if not isinstance(account_type, str):
raise TypeError("Missing/Incorrect account_type")
if account_type == "freelancer":
bot=FreelancerBot()
bot.create()
if account_type == "upwork": | bot=UpworkBot() | 1 | 2023-12-05 00:14:06+00:00 | 8k |
ChatClue/ChatClue | osiris.py | [
{
"identifier": "OSHelper",
"path": "utils/os/helpers.py",
"snippet": "class OSHelper:\n \"\"\"\n Provides utility methods for operating system level operations, particularly file management.\n\n This class includes static methods for performing various file system tasks such as cleaning up orp... | from config import CELERY_CONFIG, LOG_LEVEL, VIDEO_SETTINGS
from utils.os.helpers import OSHelper
from celery import Celery
from celery_config import get_celery_app
from database.setup import DatabaseSetup
from broadcast.broadcaster import broadcaster
from audio.audio_processor import AudioProcessor
from video.video_processor import VideoProcessor
from audio.audio_out import get_audio_out
from utils.os.helpers import OSHelper
from utils.text.welcome import welcome_message
from utils.logging.colors import ColorFormatter
from background.memory.tasks import *
from tools import * # Import all openai tool functions
import logging
import subprocess
import atexit
import sys
import threading
import time
import cv2
import queue | 6,852 |
# Configure basic logging for the application
logging.basicConfig(level=LOG_LEVEL)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s'))
# Ensure the necessary tmp/ directories exist
OSHelper.configure_tmp_directories()
# Configure background processor / subconcious systems
|
# Configure basic logging for the application
logging.basicConfig(level=LOG_LEVEL)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s'))
# Ensure the necessary tmp/ directories exist
OSHelper.configure_tmp_directories()
# Configure background processor / subconcious systems | celery_app = get_celery_app() | 1 | 2023-12-06 09:10:06+00:00 | 8k |
GXNU-ZhongLab/ODTrack | lib/test/tracker/odtrack.py | [
{
"identifier": "build_odtrack",
"path": "lib/models/odtrack/odtrack.py",
"snippet": "def build_odtrack(cfg, training=True):\r\n current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root\r\n pretrained_path = os.path.join(current_dir, '../../../pretrained_networks')\r\n... | import math
import numpy as np
import torch
import cv2
import os
from lib.models.odtrack import build_odtrack
from lib.test.tracker.basetracker import BaseTracker
from lib.test.tracker.vis_utils import gen_visualization
from lib.test.utils.hann import hann2d
from lib.train.data.processing_utils import sample_target
from lib.test.tracker.data_utils import Preprocessor
from lib.utils.box_ops import clip_box
from lib.utils.ce_utils import generate_mask_cond | 4,163 |
# for debug
class ODTrack(BaseTracker):
def __init__(self, params):
super(ODTrack, self).__init__(params)
network = build_odtrack(params.cfg, training=False)
network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True)
self.cfg = params.cfg
self.network = network.cuda()
self.network.eval()
self.preprocessor = Preprocessor()
self.state = None
self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE
# motion constrain
|
# for debug
class ODTrack(BaseTracker):
def __init__(self, params):
super(ODTrack, self).__init__(params)
network = build_odtrack(params.cfg, training=False)
network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True)
self.cfg = params.cfg
self.network = network.cuda()
self.network.eval()
self.preprocessor = Preprocessor()
self.state = None
self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE
# motion constrain | self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda() | 3 | 2023-12-10 03:57:19+00:00 | 8k |
lumina-test/lumina | lumina/orchestrator/main.py | [
{
"identifier": "config_stream_handler",
"path": "lumina/utils/config_loggers.py",
"snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\... | import argparse, sys, os, yaml, time, subprocess, logging, math, copy
import lumina.orchestrator.host as host
import lumina.orchestrator.switch as switch
import lumina.analyzer.pcap_processor.pcap_process as pcap_process
from lumina.utils.config_loggers import config_stream_handler, config_file_handler
from lumina.analyzer.pcap_processor.pcap_process import get_packet_list
from lumina.analyzer.counter.switch_counter import SwitchCounter
from lumina.analyzer.checker.integrity_check import IntegrityCheck | 5,301 | return False
logging.info("Experiment completed successfully")
return True
def clean_up(self):
""" Clean up the environment after the experiment
Returns:
bool: True if the clean up is completed successfully, False otherwise
"""
logging.info("Start cleaning up the environment")
if self.switch.clean_up() == False:
logging.error("Failed to clean up switch")
return False
if self.requester.clean_up() == False:
logging.error("Failed to clean up requester")
return False
if self.responder.clean_up() == False:
logging.error("Failed to clean up responder")
return False
if self.requester_mirror.clean_up() == False:
logging.error("Failed to clean up requester mirror")
return False
if self.responder_mirror.clean_up() == False:
logging.error("Failed to clean up responder mirror")
return False
return True
def fetch_results(self, iter_id=0):
""" Fetch the results of iteration 'iter_id', including:
1. Switch table entries and counters
2. Packet trace (pcap file)
3. Configs and end-to-end results from RDMA hosts
Args:
iter_id (int, optional): iteration ID, defaults to 0
Returns:
bool: True if the result collection is completed successfully, False otherwise
"""
## Make the results dir if it does not exist
iter_result_path = os.path.join(self.result_path, str(iter_id))
cmd = "mkdir -p %s" % iter_result_path
try:
subprocess.call(cmd, shell=True)
except:
logging.error("Failed to create result directory %s" % iter_result_path)
return False
if self.switch.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from switch")
return False
if self.requester_mirror.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from requester mirror")
return False
if self.responder_mirror.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from responder mirror")
return False
if self.requester.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from requester")
return False
if self.responder.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from responder")
return False
logging.info("Finished fetching results for iteration %d" % iter_id)
return True
def merge_traces(self, iter_id=0):
iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)
src_pcap_file_list = [os.path.join(iter_pcap_dir_path,
self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),
os.path.join(iter_pcap_dir_path,
self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]
target_pcap_path = os.path.join(self.result_path,
str(iter_id),
host.PCAP_RESULT_DIR,
self.aggregate_pcap_filename)
packet_list = pcap_process.merge_pcaps(src_pcap_file_list)
if packet_list is None:
logging.error("Failed to merge pcap files for iteration %d" % iter_id)
return False
if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:
logging.error("Failed to dump packets to pcap file %s" % target_pcap_path)
return False
logging.info("Successfully merged pcap files for iteration %d" % iter_id)
def check_integrity(self, iter_id=0):
## Check if the collected packet trace passes integrity check
pcap_path = os.path.join(self.result_path,
str(iter_id),
host.PCAP_RESULT_DIR,
self.aggregate_pcap_filename)
packet_list = get_packet_list(pcap_path)
packet_list.sort(key=lambda x:x.get_switch_seqnum())
logging.info("Packet trace sorted by switch sequence number.")
switch_state_snapshot = os.path.join(self.result_path,
str(iter_id),
switch.SWITCH_RESULT_DIR,
switch.SWITCH_STATE_SNAPSHOT)
port_map = {'requester': self.requester.conf['nic']['switch-port'],
'responder': self.responder.conf['nic']['switch-port'],
'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],
'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}
switch_counter = SwitchCounter(switch_state_snapshot, port_map)
|
## Logs will be logged into file LOG_FILENAME
LOG_FILENAME = "run.log"
## Max # of experiment retries
MAX_NB_EXP_RETRIES = 3
class Orchestrator:
""" Class to manage the experiment """
def __init__(self, config_file):
""" Constructor for Orchestrator class
Args:
config_file (str): path to the yaml (config) file.
The file contains configs for switch, requester, responder, traffic, etc.
Returns:
N/A
"""
with open(config_file, "r") as stream:
conf = yaml.safe_load(stream)
try:
local_workspace = conf['local-workspace']
result_path = conf['result-path']
switch_conf = conf['switch']
requester_conf = conf['requester']
responder_conf = conf['responder']
requester_mirror_conf = conf['requester-mirror']
responder_mirror_conf = conf['responder-mirror']
traffic_conf = conf['traffic']
rewrite_udp_dst_port = conf['rewrite-udp-dst-port']
num_repeats = conf['num-repeats']
agg_pcap_filename = conf['aggregate-pcap-filename']
except KeyError as e:
print("Config file %s has a bad yaml format (key error: %s)" % (config_file, e))
sys.exit(-1)
switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port
requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port
responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port
self.local_workspace = local_workspace
self.result_path = result_path
self.traffic_conf = traffic_conf
self.num_repeats = num_repeats
self.switch = switch.Switch(switch_conf)
self.requester = host.RDMAHost(requester_conf)
self.responder = host.RDMAHost(responder_conf)
self.requester_mirror = host.MirrorHost(requester_mirror_conf)
self.responder_mirror = host.MirrorHost(responder_mirror_conf)
self.aggregate_pcap_filename = agg_pcap_filename
cmd = "mkdir -p %s" % self.result_path
subprocess.call(cmd, shell = True)
def rm_old_files(self):
""" Remove result files left by previous experiments """
old_iter_id = 0
old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))
while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):
cmd = "rm -rf %s" % (old_iter_result_path)
subprocess.call(cmd, shell=True)
old_iter_id += 1
old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))
def get_requester_ip_list(self):
""" Return the list of requester IP addresses (without prefix length info) """
return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]
def get_responder_ip_list(self):
""" Return the list of responder IP addresses (without prefix length info) """
return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]
def get_num_repeats(self):
""" Return the number of experiment repeats """
return self.num_repeats
def sync_and_compile(self):
""" Syncronize and compile the code on all the hosts
Returns:
bool: True if the code is synced and compiled successfully, False otherwise
"""
logging.info("Sync and compile the code")
## Sync and compile the switch code
ret = self.switch.sync_and_compile(self.local_workspace,
switch.SWITCH_PROG_DIR_NAME,
switch.SWITCH_PROG_FILE_NAME)
if ret == False:
logging.error("Failed to sync and compile the switch code")
return False
## Sync and compile the traffic generator code
rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()
if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:
logging.error("Invalid RDMA verb: %s" % rdma_verb)
return False
ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,
prog_dir_name=self.requester.traffic_gen_dir_name(),
prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))
if ret == False:
logging.error("Failed to sync and compile the traffic generator code on requester")
return False
ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,
prog_dir_name=self.requester.traffic_gen_dir_name(),
prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))
if ret == False:
logging.error("Failed to sync and compile the traffic generator code on responder")
return False
ret = self.requester.sync(local_workspace=self.local_workspace,
prog_dir_name=host.DUMP_COUNTER_DIR_NAME)
if ret == False:
logging.error("Failed to sync the dump counter code on requester")
return False
ret = self.responder.sync(local_workspace=self.local_workspace,
prog_dir_name=host.DUMP_COUNTER_DIR_NAME)
if ret == False:
logging.error("Failed to sync the dump counter code on responder")
return False
## Sync and compile the packet capture code
ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,
prog_dir_name=host.PKT_CAPTURE_DIR_NAME,
prog_file_name=host.PKT_CAPTURE_FILE_NAME)
if ret == False:
logging.error("Failed to sync and compile the packet capture code on requester_mirror")
return False
ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,
prog_dir_name=host.PKT_CAPTURE_DIR_NAME,
prog_file_name=host.PKT_CAPTURE_FILE_NAME)
if ret == False:
logging.error("Failed to sync and compile the packet capture code on responder_mirror")
return False
return True
def generate_switch_table_config(self):
""" Generate the switch configuration, including:
1. Forward table
2. Mirror table
3. ARP table
4. Traffic table, including the events to inject
Returns:
bool: True if the switch configuration is generated successfully, False otherwise
"""
requester_nic_conf = self.requester.conf['nic']
responder_nic_conf = self.responder.conf['nic']
requester_mirror_nic_conf = self.requester_mirror.conf['nic']
responder_mirror_nic_conf = self.responder_mirror.conf['nic']
## Set up forward table entries
self.switch.conf['forward-table'] = []
try:
for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \
requester_mirror_nic_conf, responder_mirror_nic_conf],
['requester', 'responder', 'requester_mirror', 'responder_mirror']):
forward_table_entry = {'dst-mac': nic_conf['mac'],
'eg-port': nic_conf['switch-port'],
'host': host_type}
self.switch.conf['forward-table'].append(forward_table_entry)
except:
logging.error("Failed to set forward table")
return False
## Set up mirror table entries, use ingress_to_egress
try:
requester_mirror_entry = {'direction': 'ingress_to_egress',
'src-port': requester_nic_conf['switch-port'],
'dst-port': requester_mirror_nic_conf['switch-port']}
responder_mirror_entry = {'direction': 'ingress_to_egress',
'src-port': responder_nic_conf['switch-port'],
'dst-port': responder_mirror_nic_conf['switch-port']}
self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]
except:
logging.error("Failed to set mirror table")
return False
requester_mac = requester_nic_conf['mac']
responder_mac = responder_nic_conf['mac']
requester_ip_list = requester_nic_conf['ip-list']
responder_ip_list = responder_nic_conf['ip-list']
## Set up arp table entries
arp_entries = []
try:
for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],
[requester_mac, responder_mac]):
for dst_ip_subnet in dst_ip_list:
dst_ip = dst_ip_subnet.split('/')[0]
arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})
self.switch.conf['arp-table'] = arp_entries
except:
logging.error("Failed to set ARP table")
return False
## Generate the events of each iteration for switch config
per_iter_event_list = self.traffic_conf['data-pkt-events']
msg_size = self.traffic_conf['message-size']
mtu = self.traffic_conf['mtu']
num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']
num_pkts_per_msg = int(math.ceil(msg_size / mtu))
self.switch.conf['traffic'] = {}
self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp
self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg
self.switch.conf['traffic']['data-pkt-events'] = []
if per_iter_event_list is None or len(per_iter_event_list) == 0:
## No events at all
return True
for i in range(num_msgs_per_qp):
for per_iter_event in per_iter_event_list:
global_event = copy.deepcopy(per_iter_event)
## This event is applied to all the packets of the message. We need to expand it!
if str(global_event['psn']).lower() == 'all':
for psn in range(num_pkts_per_msg):
global_event['psn'] = psn + i * num_pkts_per_msg
self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))
else:
global_event['psn'] += i * num_pkts_per_msg
self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))
return True
def ping_mesh(self):
""" Ping all the IP addresses between requester and responder to check the connectivity
Returns:
bool: True if all the IP addresses can be pinged successfully, False otherwise
"""
for requester_ip_subnet in self.requester.conf['nic']['ip-list']:
requester_ip = requester_ip_subnet.split('/')[0]
command = "ping " + requester_ip + " -c 5 -i 0.2"
ret_val, err_info, exit_status = self.responder.execute_command(command)
if exit_status != 0:
logging.error("Failed to ping ip " + requester_ip)
logging.error("[Command return info]: %s %s" % (', '.join(ret_val), ', '.join(err_info)))
return False
for responder_ip_subnet in self.responder.conf['nic']['ip-list']:
responder_ip = responder_ip_subnet.split('/')[0]
command = "ping " + responder_ip + " -c 5 -i 0.2"
ret_val, err_info, exit_status = self.requester.execute_command(command)
if exit_status != 0:
logging.error("Failed to ping ip " + responder_ip)
logging.error("[Command return info]: %s %s" % (ret_val, err_info))
return False
logging.info("Successfully pinged all the IP addresses between requester and responder")
return True
def generate_switch_config_file(self):
""" Generate the switch configuration file and copy it to the switch
Returns:
bool: True if the switch configuration file is generated and copied successfully, False otherwise
"""
## Get the mac address for all the hosts
self.requester.get_mac_address()
self.responder.get_mac_address()
self.requester_mirror.get_mac_address()
self.responder_mirror.get_mac_address()
## Generate config for Match-Action table in switch
if self.generate_switch_table_config() == False:
logging.error("Failed to generate switch table configuration")
return False
## Dump the switch configuration into a file, and copy it to the switch
if self.switch.dump_controller_config(self.local_workspace) == False:
logging.error("Failed to dump switch config")
return False
return True
def __is_valid_traffc(self):
""" Check if the traffic configuration is valid, including:
1. The tx-depth should be 1 or > 1
2. If tx-depth > 1, then we can only inject ECN marking events
Returns:
bool: True if the traffic configuration is valid, False otherwise
"""
try:
data_pkt_events = self.traffic_conf['data-pkt-events']
tx_depth = self.traffic_conf['tx-depth']
if tx_depth == 1:
return True
elif tx_depth <= 0:
return False
for event in data_pkt_events:
if event['type'] != 'ecn':
logging.error("Cannot inject %s event when tx depth = %d" % (event['type'], tx_depth))
return False
except:
logging.error("Failed to parse traffic configuration")
return False
return True
def run_experiment(self):
""" Run the experiment
Returns:
bool: True if the experiment is completed successfully, False otherwise
"""
## Check if traffic configuration is valid
if self.__is_valid_traffc() == False:
logging.error("Invalid traffic configuration")
return False
## Run switch program
if self.switch.run_switch() == False:
logging.error("Failed to run switch")
return False
## Sleep for 1 second to make sure control plane is listenning (for client message)
time.sleep(1)
## Configure the servers
if self.requester.config_traffic_gen() == False:
logging.error("Failed to config RDMA requester")
return False
if self.responder.config_traffic_gen() == False:
logging.error("Failed to config RDMA responder")
return False
if self.requester_mirror.config_packet_capture() == False:
logging.error("Failed to config packet capture on requester mirror")
return False
if self.responder_mirror.config_packet_capture() == False:
logging.error("Failed to config packet capture on responder mirror")
return False
## Check the connectivity through pingmesh (try 5 rounds)
num_tries = 0
pingmesh_ret = False
while num_tries < 5:
pingmesh_ret = self.ping_mesh()
if pingmesh_ret == True:
break
num_tries += 1
time.sleep(1)
if pingmesh_ret == False:
logging.error("Failed to ping all the IP addresses between requester and responder")
return False
## Launch packet capture for both side
## Prerequisite: config hugepage and igb_uio if needed
if self.requester_mirror.run_packet_capture() == False:
logging.error("Failed to run packet capture on requester mirror")
return False
if self.responder_mirror.run_packet_capture() == False:
logging.error("Failed to run packet capture on responder mirror")
return False
time.sleep(3)
## Dump the counters before running
if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:
logging.error("Failed to dump counters on requester before running")
return False
if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:
logging.error("Failed to dump counters on responder before running")
return False
## Launch RDMA server first
run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)
if run_server_ret == False:
logging.error("Failed to run RDMA server")
return False
time.sleep(2)
## Launch RDMA client
try:
destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]
destination_ip = destination_ip_subnet.split('/')[0]
except:
logging.error("Failed to get destination IP")
return False
run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,
destination_ip=destination_ip,
controller_ip=self.switch.conf['control-ip'],
controller_listen_port=self.switch.conf['listen-port'])
if run_client_ret == False:
logging.error("Failed to run RDMA client")
return False
if self.switch.dump_results() == False:
logging.error("Failed to dump results from switch")
return False
if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:
logging.error("Failed to dump counters on requester after running")
return False
if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:
logging.error("Failed to dump counters on responder after running")
return False
logging.info("Experiment completed successfully")
return True
def clean_up(self):
""" Clean up the environment after the experiment
Returns:
bool: True if the clean up is completed successfully, False otherwise
"""
logging.info("Start cleaning up the environment")
if self.switch.clean_up() == False:
logging.error("Failed to clean up switch")
return False
if self.requester.clean_up() == False:
logging.error("Failed to clean up requester")
return False
if self.responder.clean_up() == False:
logging.error("Failed to clean up responder")
return False
if self.requester_mirror.clean_up() == False:
logging.error("Failed to clean up requester mirror")
return False
if self.responder_mirror.clean_up() == False:
logging.error("Failed to clean up responder mirror")
return False
return True
def fetch_results(self, iter_id=0):
""" Fetch the results of iteration 'iter_id', including:
1. Switch table entries and counters
2. Packet trace (pcap file)
3. Configs and end-to-end results from RDMA hosts
Args:
iter_id (int, optional): iteration ID, defaults to 0
Returns:
bool: True if the result collection is completed successfully, False otherwise
"""
## Make the results dir if it does not exist
iter_result_path = os.path.join(self.result_path, str(iter_id))
cmd = "mkdir -p %s" % iter_result_path
try:
subprocess.call(cmd, shell=True)
except:
logging.error("Failed to create result directory %s" % iter_result_path)
return False
if self.switch.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from switch")
return False
if self.requester_mirror.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from requester mirror")
return False
if self.responder_mirror.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from responder mirror")
return False
if self.requester.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from requester")
return False
if self.responder.fetch_results(iter_result_path) == False:
logging.error("Failed to fetch results from responder")
return False
logging.info("Finished fetching results for iteration %d" % iter_id)
return True
def merge_traces(self, iter_id=0):
iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)
src_pcap_file_list = [os.path.join(iter_pcap_dir_path,
self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),
os.path.join(iter_pcap_dir_path,
self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]
target_pcap_path = os.path.join(self.result_path,
str(iter_id),
host.PCAP_RESULT_DIR,
self.aggregate_pcap_filename)
packet_list = pcap_process.merge_pcaps(src_pcap_file_list)
if packet_list is None:
logging.error("Failed to merge pcap files for iteration %d" % iter_id)
return False
if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:
logging.error("Failed to dump packets to pcap file %s" % target_pcap_path)
return False
logging.info("Successfully merged pcap files for iteration %d" % iter_id)
def check_integrity(self, iter_id=0):
## Check if the collected packet trace passes integrity check
pcap_path = os.path.join(self.result_path,
str(iter_id),
host.PCAP_RESULT_DIR,
self.aggregate_pcap_filename)
packet_list = get_packet_list(pcap_path)
packet_list.sort(key=lambda x:x.get_switch_seqnum())
logging.info("Packet trace sorted by switch sequence number.")
switch_state_snapshot = os.path.join(self.result_path,
str(iter_id),
switch.SWITCH_RESULT_DIR,
switch.SWITCH_STATE_SNAPSHOT)
port_map = {'requester': self.requester.conf['nic']['switch-port'],
'responder': self.responder.conf['nic']['switch-port'],
'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],
'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}
switch_counter = SwitchCounter(switch_state_snapshot, port_map)
| integrity_checker = IntegrityCheck(packet_list=packet_list, | 4 | 2023-12-09 08:21:14+00:00 | 8k |
equilibration/equipy | equipy/graphs/_arrow_plot.py | [
{
"identifier": "permutations_columns",
"path": "equipy/utils/permutations/_compute_permutations.py",
"snippet": "def permutations_columns(sensitive_features):\n \"\"\"\n Generate permutations of columns in the input array sensitive_features.\n\n Parameters\n ----------\n sensitive_featur... | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from ..utils.permutations._compute_permutations import permutations_columns, calculate_perm_wasserstein
from ..utils.permutations.metrics._fairness_permutations import unfairness_permutations
from ..utils.permutations.metrics._performance_permutations import performance_permutations | 3,738 | line.axes.annotate(label, xytext=(
x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10)
ax.scatter(x[i], y[i], label=label, marker="+", s=150)
elif (i == len(x)-1) & (final_model):
label = f"$A_{1}$" + r"$_:$" + f"$_{i}$-fair"
line.axes.annotate(label, xytext=(
x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10)
ax.scatter(x[i], y[i], label=label, marker="*", s=150)
elif (i == 2) & (i < len(x)-1):
label = f"$A_{sens[1]}$" + r"$_,$" + f"$_{sens[i]}$-fair"
line.axes.annotate(label, xytext=(
x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10)
ax.scatter(x[i], y[i], label=label, marker="+", s=150)
else:
ax.scatter(x[i], y[i], marker="+", s=150, color="grey", alpha=0.4)
ax.set_xlabel("Unfairness")
ax.set_ylabel("Performance")
ax.set_xlim((np.min(x)-np.min(x)/10-np.max(x)/10,
np.max(x)+np.min(x)/10+np.max(x)/10))
ax.set_ylim((np.min(y)-np.min(y)/10-np.max(y)/10,
np.max(y)+np.min(y)/10+np.max(y)/10))
ax.set_title("Exact fairness")
ax.legend(loc="best")
def _fair_custimized_arrow_plot(unfs_list, performance_list):
"""
Plot arrows representing the fairness-performance ccombinations step by step (by sensitive attribute) to reach fairness for all permutations
(order of sensitive variables for which fairness is calculated).
Parameters
----------
unfs_list : list
A list of dictionaries containing unfairness values for each permutation of fair output datasets.
performance_list : list
A list of dictionaries containing performance values for each permutation of fair output datasets.
Returns
-------
matplotlib.figure.Figure
arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for each combination.
Plotting Conventions
--------------------
- Arrows represent different fairness-performance combinations for each scenario in the input lists.
- Axes are labeled for unfairness (x-axis) and performance (y-axis).
Example Usage
-------------
>>> arrow_plot_permutations(unfs_list, performance_list)
Note
----
This function uses a global variable `ax` for plotting, ensuring compatibility with external code.
"""
global ax
fig, ax = plt.subplots()
for i in range(len(unfs_list)):
if i == 0:
fair_arrow_plot(unfs_list[i], performance_list[i],
permutations=True, final_model=False)
elif i == len(unfs_list)-1:
fair_arrow_plot(unfs_list[i], performance_list[i],
permutations=True, base_model=False)
else:
fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True,
base_model=False, final_model=False)
def fair_multiple_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test, epsilon=None, test_size=0.3, permutation=True, metric=mean_squared_error):
"""
Plot arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations.
Parameters
----------
sensitive_features_calib : numpy.ndarray
Sensitive features for calibration.
sensitive_features_test : numpy.ndarray
Sensitive features for testing.
y_calib : numpy.ndarray
Predictions for calibration.
y_test : numpy.ndarray
Predictions for testing.
y_true_test : numpy.ndarray
True labels for testing.
epsilon : float, optional
Epsilon value for calculating Wasserstein distance. Defaults to None.
test_size : float, optional
Size of the testing set. Defaults to 0.3.
permutation : bool, optional
If True, displays permutations of arrows based on input dictionaries. Defaults to True.
metric : function, optional
The metric used to evaluate performance. Defaults to mean_squared_error.
Returns
-------
matplotlib.axes.Axes
Arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations.
Plotting Conventions
--------------------
- Arrows represent different fairness-performance combinations for each permutation.
- Axes are labeled for unfairness (x-axis) and performance (y-axis).
Example Usage
-------------
>>> custom_fair_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test)
Note
----
This function uses a global variable `ax` for plotting, ensuring compatibility with external code.
"""
permut_y_fair_dict = calculate_perm_wasserstein(
y_calib, sensitive_features_calib, y_test, sensitive_features_test, epsilon=epsilon)
all_combs_sensitive_features_test = permutations_columns(
sensitive_features_test)
unfs_list = unfairness_permutations(
permut_y_fair_dict, all_combs_sensitive_features_test)
|
def fair_arrow_plot(unfs_dict, performance_dict, permutations=False, base_model=True, final_model=True):
"""
Generates an arrow plot representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness.
Parameters
----------
unfs_dict : dict
A dictionary containing unfairness values associated with the sequentially fair output datasets.
performance_dict : dict
A dictionary containing performance values associated with the sequentially fair output datasets.
permutations : bool, optional
If True, displays permutations of arrows based on input dictionaries. Defaults to False.
base_model : bool, optional
If True, includes the base model arrow. Defaults to True.
final_model : bool, optional
If True, includes the final model arrow. Defaults to True.
Returns
-------
matplotlib.figure.Figure
arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness.
Plotting Conventions
--------------------
- Arrows represent different fairness-performance combinations.
- Axes are labeled for unfairness (x-axis) and performance (y-axis).
Note
----
- This function uses a global variable `ax` for plotting, ensuring compatibility with external code.
"""
x = []
y = []
sens = [0]
for i, key in enumerate(unfs_dict.keys()):
x.append(unfs_dict[key])
if i != 0:
sens.append(int(key[9:]))
for key in performance_dict.keys():
y.append(performance_dict[key])
global ax
if not permutations:
fig, ax = plt.subplots()
line = ax.plot(x, y, linestyle="--", alpha=0.25, color="grey")[0]
for i in range(len(sens)):
if (i == 0) & (base_model):
line.axes.annotate(f"Base\nmodel", xytext=(
x[0]+np.min(x)/20, y[0]), xy=(x[0], y[0]), size=10)
ax.scatter(x[0], y[0], label="Base model", marker="^", s=100)
elif i == 1:
label = f"$A_{sens[i]}$-fair"
line.axes.annotate(label, xytext=(
x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10)
ax.scatter(x[i], y[i], label=label, marker="+", s=150)
elif (i == len(x)-1) & (final_model):
label = f"$A_{1}$" + r"$_:$" + f"$_{i}$-fair"
line.axes.annotate(label, xytext=(
x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10)
ax.scatter(x[i], y[i], label=label, marker="*", s=150)
elif (i == 2) & (i < len(x)-1):
label = f"$A_{sens[1]}$" + r"$_,$" + f"$_{sens[i]}$-fair"
line.axes.annotate(label, xytext=(
x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10)
ax.scatter(x[i], y[i], label=label, marker="+", s=150)
else:
ax.scatter(x[i], y[i], marker="+", s=150, color="grey", alpha=0.4)
ax.set_xlabel("Unfairness")
ax.set_ylabel("Performance")
ax.set_xlim((np.min(x)-np.min(x)/10-np.max(x)/10,
np.max(x)+np.min(x)/10+np.max(x)/10))
ax.set_ylim((np.min(y)-np.min(y)/10-np.max(y)/10,
np.max(y)+np.min(y)/10+np.max(y)/10))
ax.set_title("Exact fairness")
ax.legend(loc="best")
def _fair_custimized_arrow_plot(unfs_list, performance_list):
"""
Plot arrows representing the fairness-performance ccombinations step by step (by sensitive attribute) to reach fairness for all permutations
(order of sensitive variables for which fairness is calculated).
Parameters
----------
unfs_list : list
A list of dictionaries containing unfairness values for each permutation of fair output datasets.
performance_list : list
A list of dictionaries containing performance values for each permutation of fair output datasets.
Returns
-------
matplotlib.figure.Figure
arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for each combination.
Plotting Conventions
--------------------
- Arrows represent different fairness-performance combinations for each scenario in the input lists.
- Axes are labeled for unfairness (x-axis) and performance (y-axis).
Example Usage
-------------
>>> arrow_plot_permutations(unfs_list, performance_list)
Note
----
This function uses a global variable `ax` for plotting, ensuring compatibility with external code.
"""
global ax
fig, ax = plt.subplots()
for i in range(len(unfs_list)):
if i == 0:
fair_arrow_plot(unfs_list[i], performance_list[i],
permutations=True, final_model=False)
elif i == len(unfs_list)-1:
fair_arrow_plot(unfs_list[i], performance_list[i],
permutations=True, base_model=False)
else:
fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True,
base_model=False, final_model=False)
def fair_multiple_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test, epsilon=None, test_size=0.3, permutation=True, metric=mean_squared_error):
"""
Plot arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations.
Parameters
----------
sensitive_features_calib : numpy.ndarray
Sensitive features for calibration.
sensitive_features_test : numpy.ndarray
Sensitive features for testing.
y_calib : numpy.ndarray
Predictions for calibration.
y_test : numpy.ndarray
Predictions for testing.
y_true_test : numpy.ndarray
True labels for testing.
epsilon : float, optional
Epsilon value for calculating Wasserstein distance. Defaults to None.
test_size : float, optional
Size of the testing set. Defaults to 0.3.
permutation : bool, optional
If True, displays permutations of arrows based on input dictionaries. Defaults to True.
metric : function, optional
The metric used to evaluate performance. Defaults to mean_squared_error.
Returns
-------
matplotlib.axes.Axes
Arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations.
Plotting Conventions
--------------------
- Arrows represent different fairness-performance combinations for each permutation.
- Axes are labeled for unfairness (x-axis) and performance (y-axis).
Example Usage
-------------
>>> custom_fair_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test)
Note
----
This function uses a global variable `ax` for plotting, ensuring compatibility with external code.
"""
permut_y_fair_dict = calculate_perm_wasserstein(
y_calib, sensitive_features_calib, y_test, sensitive_features_test, epsilon=epsilon)
all_combs_sensitive_features_test = permutations_columns(
sensitive_features_test)
unfs_list = unfairness_permutations(
permut_y_fair_dict, all_combs_sensitive_features_test) | performance_list = performance_permutations( | 3 | 2023-12-06 14:43:41+00:00 | 8k |
Tlntin/booking_simulator | apps/agentfabric/appBot.py | [
{
"identifier": "get_avatar_image",
"path": "config_utils.py",
"snippet": "def get_avatar_image(bot_avatar, uuid_str=''):\n user_avatar_path = os.path.join(\n os.path.dirname(__file__), 'assets/user.jpg')\n bot_avatar_path = os.path.join(os.path.dirname(__file__), 'assets/bot.jpg')\n if ... | import os
import random
import shutil
import sys
import traceback
import gradio as gr
from config_utils import get_avatar_image, get_ci_dir, parse_configuration
from gradio_utils import ChatBot, format_cover_html
from user_core import init_user_chatbot_agent | 5,869 |
uuid_str = 'local_user'
builder_cfg, model_cfg, tool_cfg, available_tool_list, _, _ = parse_configuration(
uuid_str)
suggests = builder_cfg.get('prompt_recommend', [])
avatar_pairs = get_avatar_image(builder_cfg.get('avatar', ''), uuid_str)
customTheme = gr.themes.Default(
primary_hue=gr.themes.utils.colors.blue,
radius_size=gr.themes.utils.sizes.radius_none,
)
def check_uuid(uuid_str):
if not uuid_str or uuid_str == '':
if os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio':
raise gr.Error('请登陆后使用! (Please login first)')
else:
uuid_str = 'local_user'
return uuid_str
def init_user(state):
try:
seed = state.get('session_seed', random.randint(0, 1000000000))
user_agent = init_user_chatbot_agent(uuid_str)
user_agent.seed = seed
state['user_agent'] = user_agent
except Exception as e:
error = traceback.format_exc()
print(f'Error:{e}, with detail: {error}')
return state
# 创建 Gradio 界面
demo = gr.Blocks(css='assets/appBot.css', theme=customTheme)
with demo:
gr.Markdown(
'# <center> \N{fire} AgentFabric powered by Modelscope-agent ([github star](https://github.com/modelscope/modelscope-agent/tree/main))</center>' # noqa E501
)
draw_seed = random.randint(0, 1000000000)
state = gr.State({'session_seed': draw_seed})
with gr.Row(elem_classes='container'):
with gr.Column(scale=4):
with gr.Column():
# Preview
user_chatbot = ChatBot(
value=[[None, '尝试问我一点什么吧~']],
elem_id='user_chatbot',
elem_classes=['markdown-body'],
avatar_images=avatar_pairs,
height=600,
latex_delimiters=[],
show_label=False)
with gr.Row():
with gr.Column(scale=12):
preview_chat_input = gr.Textbox(
show_label=False,
container=False,
placeholder='跟我聊聊吧~')
with gr.Column(min_width=70, scale=1):
upload_button = gr.UploadButton(
'上传',
file_types=[
'.csv', '.doc', '.docx', '.xls', '.xlsx', '.txt',
'.md', '.pdf', '.jpeg', '.png', '.jpg', '.gif'
],
file_count='multiple')
with gr.Column(min_width=70, scale=1):
preview_send_button = gr.Button('发送', variant='primary')
with gr.Column(scale=1):
user_chat_bot_cover = gr.HTML(
format_cover_html(builder_cfg, avatar_pairs[1]))
user_chat_bot_suggest = gr.Examples(
label='Prompt Suggestions',
examples=suggests,
inputs=[preview_chat_input])
def upload_file(chatbot, upload_button, _state):
_uuid_str = check_uuid(uuid_str)
new_file_paths = []
if 'file_paths' in _state:
file_paths = _state['file_paths']
else:
file_paths = []
for file in upload_button:
file_name = os.path.basename(file.name)
# covert xxx.json to xxx_uuid_str.json
file_name = file_name.replace('.', f'_{_uuid_str}.')
|
uuid_str = 'local_user'
builder_cfg, model_cfg, tool_cfg, available_tool_list, _, _ = parse_configuration(
uuid_str)
suggests = builder_cfg.get('prompt_recommend', [])
avatar_pairs = get_avatar_image(builder_cfg.get('avatar', ''), uuid_str)
customTheme = gr.themes.Default(
primary_hue=gr.themes.utils.colors.blue,
radius_size=gr.themes.utils.sizes.radius_none,
)
def check_uuid(uuid_str):
if not uuid_str or uuid_str == '':
if os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio':
raise gr.Error('请登陆后使用! (Please login first)')
else:
uuid_str = 'local_user'
return uuid_str
def init_user(state):
try:
seed = state.get('session_seed', random.randint(0, 1000000000))
user_agent = init_user_chatbot_agent(uuid_str)
user_agent.seed = seed
state['user_agent'] = user_agent
except Exception as e:
error = traceback.format_exc()
print(f'Error:{e}, with detail: {error}')
return state
# 创建 Gradio 界面
demo = gr.Blocks(css='assets/appBot.css', theme=customTheme)
with demo:
gr.Markdown(
'# <center> \N{fire} AgentFabric powered by Modelscope-agent ([github star](https://github.com/modelscope/modelscope-agent/tree/main))</center>' # noqa E501
)
draw_seed = random.randint(0, 1000000000)
state = gr.State({'session_seed': draw_seed})
with gr.Row(elem_classes='container'):
with gr.Column(scale=4):
with gr.Column():
# Preview
user_chatbot = ChatBot(
value=[[None, '尝试问我一点什么吧~']],
elem_id='user_chatbot',
elem_classes=['markdown-body'],
avatar_images=avatar_pairs,
height=600,
latex_delimiters=[],
show_label=False)
with gr.Row():
with gr.Column(scale=12):
preview_chat_input = gr.Textbox(
show_label=False,
container=False,
placeholder='跟我聊聊吧~')
with gr.Column(min_width=70, scale=1):
upload_button = gr.UploadButton(
'上传',
file_types=[
'.csv', '.doc', '.docx', '.xls', '.xlsx', '.txt',
'.md', '.pdf', '.jpeg', '.png', '.jpg', '.gif'
],
file_count='multiple')
with gr.Column(min_width=70, scale=1):
preview_send_button = gr.Button('发送', variant='primary')
with gr.Column(scale=1):
user_chat_bot_cover = gr.HTML(
format_cover_html(builder_cfg, avatar_pairs[1]))
user_chat_bot_suggest = gr.Examples(
label='Prompt Suggestions',
examples=suggests,
inputs=[preview_chat_input])
def upload_file(chatbot, upload_button, _state):
_uuid_str = check_uuid(uuid_str)
new_file_paths = []
if 'file_paths' in _state:
file_paths = _state['file_paths']
else:
file_paths = []
for file in upload_button:
file_name = os.path.basename(file.name)
# covert xxx.json to xxx_uuid_str.json
file_name = file_name.replace('.', f'_{_uuid_str}.') | file_path = os.path.join(get_ci_dir(), file_name) | 1 | 2023-12-12 04:24:00+00:00 | 8k |
finned-tech/sportsbookreview-scraper | cli.py | [
{
"identifier": "NFLOddsScraper",
"path": "scrapers/sportsbookreview.py",
"snippet": "class NFLOddsScraper(OddsScraper):\n def __init__(self, years):\n super().__init__(\"nfl\", years)\n self.base = (\n \"https://www.sportsbookreviewsonline.com/scoresoddsarchives/nfl-odds-\"\... | import argparse
import config
from scrapers.sportsbookreview import (
NFLOddsScraper,
NBAOddsScraper,
NHLOddsScraper,
MLBOddsScraper,
) | 5,377 |
parser = argparse.ArgumentParser()
parser.add_argument("--sport", type=str, required=True)
# start and end years
parser.add_argument("--start", type=int, required=True)
parser.add_argument("--end", type=int, required=True)
# filename for output
parser.add_argument("--filename", type=str, required=True)
# output format (csv or json), default is json
parser.add_argument("--format", type=str, default="json")
args = parser.parse_args()
if __name__ == "__main__":
if args.start < config.MIN_YEAR or args.end > config.MAX_YEAR:
raise ValueError(
f"Invalid year range. Must be between {config.MIN_YEAR} and {config.MAX_YEAR}."
)
if args.start > args.end:
raise ValueError("Invalid year range. Start year must be before end year.")
list_yrs = list(range(args.start, args.end + 1))
scrapers = {
"nfl": NFLOddsScraper,
|
parser = argparse.ArgumentParser()
parser.add_argument("--sport", type=str, required=True)
# start and end years
parser.add_argument("--start", type=int, required=True)
parser.add_argument("--end", type=int, required=True)
# filename for output
parser.add_argument("--filename", type=str, required=True)
# output format (csv or json), default is json
parser.add_argument("--format", type=str, default="json")
args = parser.parse_args()
if __name__ == "__main__":
if args.start < config.MIN_YEAR or args.end > config.MAX_YEAR:
raise ValueError(
f"Invalid year range. Must be between {config.MIN_YEAR} and {config.MAX_YEAR}."
)
if args.start > args.end:
raise ValueError("Invalid year range. Start year must be before end year.")
list_yrs = list(range(args.start, args.end + 1))
scrapers = {
"nfl": NFLOddsScraper, | "nba": NBAOddsScraper, | 1 | 2023-12-10 07:36:05+00:00 | 8k |
chenchenygu/watermark-learnability | compute_metrics.py | [
{
"identifier": "WatermarkDetector",
"path": "kgw_watermarking/watermark_reliability_release/watermark_processor.py",
"snippet": "class WatermarkDetector(WatermarkBase):\n \"\"\"This is the detector for all watermarks imprinted with WatermarkLogitsProcessor.\n\n The detector needs to be given the ... | import argparse
import os
import json
import numpy as np
import mauve
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoTokenizer, AutoModelForCausalLM
from tqdm import tqdm
from kgw_watermarking.watermark_reliability_release.watermark_processor import WatermarkDetector
from aar_watermark import AarWatermarkDetector | 5,824 |
DEFAULT_SEED = 42
device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser()
parser.add_argument("--tokenizer_name", type=str, required=True)
parser.add_argument("--watermark_tokenizer_name", type=str, default=None)
parser.add_argument("--truncate", action="store_true", default=False)
parser.add_argument("--num_tokens", type=int, default=200)
parser.add_argument("--lm_score_model_name", type=str, required=True)
parser.add_argument("--input_file", type=str, required=True)
parser.add_argument("--output_file", type=str, required=True)
parser.add_argument("--text_field", type=str, default="full_model_text")
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--overwrite_output_file", action="store_true", default=False)
parser.add_argument("--fp16", action="store_true", default=False)
parser.add_argument("--kgw_device", type=str, default="cpu", choices=["cpu", "cuda"])
parser.add_argument("--mauve_max_length", type=int, default=200)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
if os.path.exists(args.output_file) and not args.overwrite_output_file:
raise ValueError(f"Output file {args.output_file} already exists and overwrite_output_file is False")
with open(args.input_file, "r") as f:
data = json.load(f)
samples_dict = data["samples"]
prompt_length = data["prompt_length"]
if args.watermark_tokenizer_name is None:
args.watermark_tokenizer_name = args.tokenizer_name
tokenizer = AutoTokenizer.from_pretrained(args.watermark_tokenizer_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
compute_metrics_args_dict = {}
compute_metrics_args_dict.update(vars(args))
data["compute_metrics_args_dict"] = compute_metrics_args_dict
def save_data():
os.makedirs(os.path.dirname(args.output_file), exist_ok=True)
with open(args.output_file, "w") as f:
print(f"Writing output to {args.output_file}")
json.dump(data, f, indent=4)
# compute watermark p-values
for model_name, sd in tqdm(samples_dict.items()):
if 'watermark_config' in samples_dict[model_name]:
watermark_config = samples_dict[model_name]['watermark_config']
if isinstance(watermark_config, list):
watermark_config = watermark_config[0]
else:
#print(f"Skipping {model_name}, no watermark config")
#continue
print(f"{model_name}, no watermark config, parsing string")
watermark_config = {}
if 'aar' in model_name or "k" in watermark_config:
if not watermark_config:
aar_s = "aar-k"
k = int(model_name[model_name.find(aar_s) + len(aar_s)])
seed = DEFAULT_SEED
print(f"{k=}, {seed=}")
detector = AarWatermarkDetector(
k=k,
seed=seed,
tokenizer=tokenizer,
)
else:
detector = AarWatermarkDetector(
k=watermark_config["k"],
seed=watermark_config.get("seed", DEFAULT_SEED),
tokenizer=tokenizer,
)
elif 'kth' in model_name:
# KTH detection in kth_watermarking/compute_kth_scores.py, takes long time
print(f"Skipping {model_name}, KTH watermark")
continue
elif 'kgw' in model_name or "gamma" in watermark_config:
print(f"gamma = {watermark_config.get('gamma', 0.25)}")
|
DEFAULT_SEED = 42
device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser()
parser.add_argument("--tokenizer_name", type=str, required=True)
parser.add_argument("--watermark_tokenizer_name", type=str, default=None)
parser.add_argument("--truncate", action="store_true", default=False)
parser.add_argument("--num_tokens", type=int, default=200)
parser.add_argument("--lm_score_model_name", type=str, required=True)
parser.add_argument("--input_file", type=str, required=True)
parser.add_argument("--output_file", type=str, required=True)
parser.add_argument("--text_field", type=str, default="full_model_text")
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--overwrite_output_file", action="store_true", default=False)
parser.add_argument("--fp16", action="store_true", default=False)
parser.add_argument("--kgw_device", type=str, default="cpu", choices=["cpu", "cuda"])
parser.add_argument("--mauve_max_length", type=int, default=200)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
if os.path.exists(args.output_file) and not args.overwrite_output_file:
raise ValueError(f"Output file {args.output_file} already exists and overwrite_output_file is False")
with open(args.input_file, "r") as f:
data = json.load(f)
samples_dict = data["samples"]
prompt_length = data["prompt_length"]
if args.watermark_tokenizer_name is None:
args.watermark_tokenizer_name = args.tokenizer_name
tokenizer = AutoTokenizer.from_pretrained(args.watermark_tokenizer_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
compute_metrics_args_dict = {}
compute_metrics_args_dict.update(vars(args))
data["compute_metrics_args_dict"] = compute_metrics_args_dict
def save_data():
os.makedirs(os.path.dirname(args.output_file), exist_ok=True)
with open(args.output_file, "w") as f:
print(f"Writing output to {args.output_file}")
json.dump(data, f, indent=4)
# compute watermark p-values
for model_name, sd in tqdm(samples_dict.items()):
if 'watermark_config' in samples_dict[model_name]:
watermark_config = samples_dict[model_name]['watermark_config']
if isinstance(watermark_config, list):
watermark_config = watermark_config[0]
else:
#print(f"Skipping {model_name}, no watermark config")
#continue
print(f"{model_name}, no watermark config, parsing string")
watermark_config = {}
if 'aar' in model_name or "k" in watermark_config:
if not watermark_config:
aar_s = "aar-k"
k = int(model_name[model_name.find(aar_s) + len(aar_s)])
seed = DEFAULT_SEED
print(f"{k=}, {seed=}")
detector = AarWatermarkDetector(
k=k,
seed=seed,
tokenizer=tokenizer,
)
else:
detector = AarWatermarkDetector(
k=watermark_config["k"],
seed=watermark_config.get("seed", DEFAULT_SEED),
tokenizer=tokenizer,
)
elif 'kth' in model_name:
# KTH detection in kth_watermarking/compute_kth_scores.py, takes long time
print(f"Skipping {model_name}, KTH watermark")
continue
elif 'kgw' in model_name or "gamma" in watermark_config:
print(f"gamma = {watermark_config.get('gamma', 0.25)}") | detector = WatermarkDetector( | 0 | 2023-12-07 16:45:33+00:00 | 8k |
skyoux/SemAIM | main_finetune.py | [
{
"identifier": "ImageListFolder",
"path": "datasets/datasets.py",
"snippet": "class ImageListFolder(datasets.ImageFolder):\n def __init__(self, root, transform=None, target_transform=None,\n ann_file=None, loader=default_loader):\n self.root = root\n self.transform = tr... | import argparse
import datetime
import json
import numpy as np
import os
import time
import builtins
import torch
import torch.backends.cudnn as cudnn
import timm
import util.lr_decay as lrd
import util.misc as misc
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from datasets.datasets import ImageListFolder, build_transform
from util.pos_embed import interpolate_pos_embed
from util.misc import NativeScalerWithGradNormCount as NativeScaler
from models import models_vit
from engines.engine_finetune import train_one_epoch, evaluate | 5,515 | shuffle=False,
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = models_vit.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
global_pool=args.global_pool,
)
if args.finetune and not args.eval:
# load pretrained model
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
if 'state_dict' in checkpoint:
checkpoint_model = checkpoint['state_dict']
else:
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
checkpoint_model = {k.replace("module.", ""): v for k, v in checkpoint_model.items()}
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
print("global_pool = ", args.global_pool)
if args.global_pool:
assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
else:
assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# manually initialize fc layer
trunc_normal_(model.head.weight, std=2e-5)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
# print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
# build optimizer with layer-wise lr decay (lrd)
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
no_weight_decay_list=model_without_ddp.no_weight_decay(),
layer_decay=args.layer_decay
)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
# resume model
ckpt_path = os.path.join(args.output_dir, f"{args.model}.{args.experiment}.temp.pth")
if not os.path.isfile(ckpt_path):
print("Checkpoint not founded in {}, train from random initialization".format(ckpt_path))
else:
print("Found checkpoint at {}".format(ckpt_path))
misc.load_model(args=args, ckpt_path=ckpt_path, model_without_ddp=model, optimizer=optimizer,
loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
if global_rank == 0 and args.log_dir is not None and not args.eval:
log_dir = os.path.join(args.log_dir, f"{args.model}.{args.experiment}")
os.makedirs(log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=log_dir)
else:
log_writer = None
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
| # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
# assert timm.__version__ == "0.3.2" # version check
def get_args_parser():
parser = argparse.ArgumentParser('UM-MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.75,
help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--saveckp_freq', default=20, type=int, help='Save checkpoint every x epochs.')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--experiment', default='exp', type=str, help='experiment name (for log)')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=False)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--dist_backend', default='nccl', type=str, help='experiment name (for log)')
return parser
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
transform_train = build_transform(is_train=True, args=args)
transform_val = build_transform(is_train=False, args=args)
dataset_train = ImageListFolder(os.path.join(args.data_path, 'train'), transform=transform_train,
ann_file=os.path.join(args.data_path, 'train.txt'))
print(dataset_train)
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
dataset_val = ImageListFolder(os.path.join(args.data_path, 'train'), transform=transform_val,
ann_file=os.path.join(args.data_path, 'train.txt'))
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False
)
print("Sampler_val = %s" % str(sampler_val))
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
shuffle=False,
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = models_vit.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
global_pool=args.global_pool,
)
if args.finetune and not args.eval:
# load pretrained model
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
if 'state_dict' in checkpoint:
checkpoint_model = checkpoint['state_dict']
else:
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
checkpoint_model = {k.replace("module.", ""): v for k, v in checkpoint_model.items()}
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
print("global_pool = ", args.global_pool)
if args.global_pool:
assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
else:
assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# manually initialize fc layer
trunc_normal_(model.head.weight, std=2e-5)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
# print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
# build optimizer with layer-wise lr decay (lrd)
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
no_weight_decay_list=model_without_ddp.no_weight_decay(),
layer_decay=args.layer_decay
)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
# resume model
ckpt_path = os.path.join(args.output_dir, f"{args.model}.{args.experiment}.temp.pth")
if not os.path.isfile(ckpt_path):
print("Checkpoint not founded in {}, train from random initialization".format(ckpt_path))
else:
print("Found checkpoint at {}".format(ckpt_path))
misc.load_model(args=args, ckpt_path=ckpt_path, model_without_ddp=model, optimizer=optimizer,
loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
if global_rank == 0 and args.log_dir is not None and not args.eval:
log_dir = os.path.join(args.log_dir, f"{args.model}.{args.experiment}")
os.makedirs(log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=log_dir)
else:
log_writer = None
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch) | train_stats = train_one_epoch( | 5 | 2023-12-10 15:17:11+00:00 | 8k |
boweniac/autogan | autogan/utils/compressed_text_utils.py | [
{
"identifier": "LLMConfig",
"path": "autogan/oai/config_utils.py",
"snippet": "class LLMConfig:\n \"\"\"LLM config object\n \"\"\"\n\n def __init__(\n self,\n api_key_list: ConfigList,\n max_messages_tokens: str,\n request_interval_time: int,\n ... | import math
import re
from typing import Optional, List
from autogan.oai.config_utils import LLMConfig
from autogan.oai.count_tokens_utils import count_text_tokens
from autogan.utils.environment_utils import environment_info
from autogan.oai.generate_utils import generate_chat_completion
from autogan.utils.response import ResponseFuncType | 4,571 | --total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
compressed_text = ""
total_tokens = 0
split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)
# Calculate the approximate size of the text slices proportionally
split_safe_size = int(safe_size / len(split_texts))
for st in split_texts:
content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode,
split_safe_size)
if content:
compressed_text += content + "\n"
total_tokens += tokens
if compressed_text:
return compressed_text, total_tokens
else:
return None, None
def generate_text_summary(text: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType,
stream_mode: Optional[bool] = None, safe_size: Optional[int] = None) \
-> tuple[str, int]:
"""Generate a general summary of the text
生成文本普通摘要
:param text: Text to be compressed.
待压缩的文本。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param safe_size: The target size of the text after compression, if not provided there is no limit.
文本压缩后的目标尺寸,如果为空则不做限制。
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
if safe_size:
system_prompt = """I hope you are an article filter and refiner, filtering and refining the articles sent by users. Please ensure that your summary does not exceed the limit of max_tokens.
When the content of the article is not enough to refine, please omit other polite language and only output one word: None.
If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article.
Please note that the description perspective and chapter structure of the extracted content should be as consistent as possible with the original text, and try to retain details for subsequent reasoning. Please omit other polite language and only output the refined content."""
chat_prompt = f"max_tokens: {safe_size}\n\nArticle content:\n{text}"
# system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章,请确保您的总结不超过 max_tokens 的限制.
# 当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。
# 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容.
# 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理,请省略其他客套用语,仅输出提炼好的内容。"""
# chat_prompt = f"max_tokens: {safe_size}\n\n文章内容:\n\n{text}"
else:
system_prompt = """I hope you can serve as an article filter and refiner, filtering and refining the articles sent by users. If the content of the article is insufficient for refinement, please omit other polite phrases and output only one word: None.
If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article.
Please note that the perspective and chapter structure of the extracted content should be as consistent with the original as possible, and retain as many details as possible for subsequent reasoning. Please omit other polite phrases and only output the refined content."""
chat_prompt = f"Article content:\n{text}"
# system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章。当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。
# 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容。
# 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理。请省略其他客套用语,仅输出提炼好的内容。"""
# chat_prompt = f"文章内容:\n{text}"
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': chat_prompt}]
return generate_chat_completion(summary_model_config, chat_messages, agent_name, "text_summary", response_func,
stream_mode)
def generate_text_clues(text: str, focus: str, summary_model_config: LLMConfig, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]:
"""Generate a clue summary of the text
生成文本线索摘要
:param text: Text to be compressed.
待压缩的文本。
:param focus: The focus direction when compressing text.
压缩文本时的专注方向。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
info = environment_info()
system_prompt = """I hope you are an agent who is good at discovering the truth in real-time, capable of finding content that helps infer the answer to the question from the information sent by users.
Please note that if the content of the information has no extractable value, please omit other polite expressions and output only one word: None. Also, please help me filter out sensitive content related to politics, geopolitics, violence, and sex in the information."""
# system_prompt = """我希望你是一个善于发现实时真相的探员, 能从用户发送的资料中帮我找到有助于推断出问题答案的内容。
# 需要注意的是,如果资料内容没有可提取的价值,请省略其他客套用语,仅输出一个单词:None。另外还请帮我过滤掉资料中与政治、地缘政治、暴力、性等有关的敏感内容。"""
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user',
'content': f'The current question is:{focus}\n\nEnvironmental information:\n{info}\n\nMaterial content:\n\n{text}'}]
# chat_messages = [{'role': 'user', 'content': f'当前的问题是:{focus}\n\n环境信息:\n{info}\n\n资料内容:\n\n{text}'}]
return generate_chat_completion(summary_model_config, chat_messages, agent_name, "clue_summary", response_func,
stream_mode)
def split_text(text: str, split_size: int, model: Optional[str] = None) -> List[str]:
"""Split the long text and store the text slices in a list
将长文本拆分,并将文本切片存储至列表
"""
split_texts = []
|
def compressed_text_universal(text: str, summary_model_config: LLMConfig, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None,
focus: Optional[str] = None, safe_size: Optional[int] = None) \
-> tuple[Optional[str], Optional[int]]:
"""Compress the text, generating either a regular summary or a cue summary.
压缩文本,可生成普通摘要或线索摘要。
First, the long text is sliced, and then a summary is generated for each slice.
首先将长文本切片,然后逐切片的生成摘要。
If the value of the focus parameter is not None, then the attention will be focused on the focus area while generating the summary.
如 focus 参数的值不为 None 则在生成摘要时注意力集中于 focus。
If the value of the safe_size parameter is not None and the length of the initial compression result exceeds the safe_size, the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size.
如 safe_size 参数的值不为 None 且初次压缩结果长度超过 safe_size,则会对摘要进一步压缩,压缩后的大小被期望保持在 safe_size 范围之内。
:param text: Text to be compressed.
待压缩的文本。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param focus: The focus direction when compressing text.
压缩文本时的专注方向。
:param safe_size: The target size of the text after compression, if not provided there is no limit.
文本压缩后的目标尺寸,如果为空则不做限制。
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
compressed_text = ""
total_tokens = 0
split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)
for st in split_texts:
if focus:
content, tokens = generate_text_clues(st, focus, summary_model_config, agent_name, response_func,
stream_mode)
else:
content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode)
if content:
compressed_text += content + "\n"
total_tokens += tokens
if compressed_text:
if safe_size and safe_size < total_tokens:
return compressed_text_into_safe_size(compressed_text, safe_size, summary_model_config, agent_name,
response_func, stream_mode)
else:
return compressed_text, total_tokens
else:
return None, None
def compressed_text_into_safe_size(text: str, safe_size: int, summary_model_config: LLMConfig, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) \
-> tuple[Optional[str], Optional[int]]:
"""Compress the text to a safe size
压缩文本至安全尺寸
First, the long text is sliced, and then a summary is generated for each slice.
首先将长文本切片,然后逐切片的生成摘要。
the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size.
压缩后的大小被期望保持在 safe_size 范围之内。
:param text: Text to be compressed.
待压缩的文本。
:param safe_size: The target size of the text after compression.
文本压缩后的目标尺寸。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
compressed_text = ""
total_tokens = 0
split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)
# Calculate the approximate size of the text slices proportionally
split_safe_size = int(safe_size / len(split_texts))
for st in split_texts:
content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode,
split_safe_size)
if content:
compressed_text += content + "\n"
total_tokens += tokens
if compressed_text:
return compressed_text, total_tokens
else:
return None, None
def generate_text_summary(text: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType,
stream_mode: Optional[bool] = None, safe_size: Optional[int] = None) \
-> tuple[str, int]:
"""Generate a general summary of the text
生成文本普通摘要
:param text: Text to be compressed.
待压缩的文本。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param safe_size: The target size of the text after compression, if not provided there is no limit.
文本压缩后的目标尺寸,如果为空则不做限制。
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
if safe_size:
system_prompt = """I hope you are an article filter and refiner, filtering and refining the articles sent by users. Please ensure that your summary does not exceed the limit of max_tokens.
When the content of the article is not enough to refine, please omit other polite language and only output one word: None.
If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article.
Please note that the description perspective and chapter structure of the extracted content should be as consistent as possible with the original text, and try to retain details for subsequent reasoning. Please omit other polite language and only output the refined content."""
chat_prompt = f"max_tokens: {safe_size}\n\nArticle content:\n{text}"
# system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章,请确保您的总结不超过 max_tokens 的限制.
# 当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。
# 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容.
# 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理,请省略其他客套用语,仅输出提炼好的内容。"""
# chat_prompt = f"max_tokens: {safe_size}\n\n文章内容:\n\n{text}"
else:
system_prompt = """I hope you can serve as an article filter and refiner, filtering and refining the articles sent by users. If the content of the article is insufficient for refinement, please omit other polite phrases and output only one word: None.
If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article.
Please note that the perspective and chapter structure of the extracted content should be as consistent with the original as possible, and retain as many details as possible for subsequent reasoning. Please omit other polite phrases and only output the refined content."""
chat_prompt = f"Article content:\n{text}"
# system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章。当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。
# 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容。
# 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理。请省略其他客套用语,仅输出提炼好的内容。"""
# chat_prompt = f"文章内容:\n{text}"
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': chat_prompt}]
return generate_chat_completion(summary_model_config, chat_messages, agent_name, "text_summary", response_func,
stream_mode)
def generate_text_clues(text: str, focus: str, summary_model_config: LLMConfig, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]:
"""Generate a clue summary of the text
生成文本线索摘要
:param text: Text to be compressed.
待压缩的文本。
:param focus: The focus direction when compressing text.
压缩文本时的专注方向。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
info = environment_info()
system_prompt = """I hope you are an agent who is good at discovering the truth in real-time, capable of finding content that helps infer the answer to the question from the information sent by users.
Please note that if the content of the information has no extractable value, please omit other polite expressions and output only one word: None. Also, please help me filter out sensitive content related to politics, geopolitics, violence, and sex in the information."""
# system_prompt = """我希望你是一个善于发现实时真相的探员, 能从用户发送的资料中帮我找到有助于推断出问题答案的内容。
# 需要注意的是,如果资料内容没有可提取的价值,请省略其他客套用语,仅输出一个单词:None。另外还请帮我过滤掉资料中与政治、地缘政治、暴力、性等有关的敏感内容。"""
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user',
'content': f'The current question is:{focus}\n\nEnvironmental information:\n{info}\n\nMaterial content:\n\n{text}'}]
# chat_messages = [{'role': 'user', 'content': f'当前的问题是:{focus}\n\n环境信息:\n{info}\n\n资料内容:\n\n{text}'}]
return generate_chat_completion(summary_model_config, chat_messages, agent_name, "clue_summary", response_func,
stream_mode)
def split_text(text: str, split_size: int, model: Optional[str] = None) -> List[str]:
"""Split the long text and store the text slices in a list
将长文本拆分,并将文本切片存储至列表
"""
split_texts = []
| count_tokens = count_text_tokens(text, model) | 1 | 2023-12-06 03:24:34+00:00 | 8k |
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation | inference.py | [
{
"identifier": "AverageMeter",
"path": "utils/metric_util.py",
"snippet": "class AverageMeter():\r\n \"\"\" Computes and stores the average and current value \"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n \"\"\" Reset all statistics \"\"\"\r\n ... | import argparse
import subprocess
import numpy as np
import os
import torch
import torch.nn as nn
import logging
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor
from utils.metric_util import AverageMeter
from utils.tensor_op import save_img_tensor, save_image_tensor
from utils.util import mkdir, setup_logger
from utils.data_util import crop_HWC_img, random_augmentation, tensor2img
from metrics.psnr_ssim import compute_psnr_ssim, calculate_psnr, calculate_ssim
from models.archs.IDR_restormer_arch import IDR_restormer | 6,695 |
noisy_img, _ = self._add_gaussian_noise(clean_img)
clean_img, noisy_img = self.toTensor(clean_img), self.toTensor(noisy_img)
return [clean_name], noisy_img, clean_img
def __len__(self):
return self.num_clean
class DerainDehazeDataset(Dataset):
def __init__(self, args, task="derain"):
super(DerainDehazeDataset, self).__init__()
self.ids = []
self.task_idx = 0
self.args = args
self.task_dict = {'derain': 0, 'dehaze': 1, 'deblur':2, 'low-light':3, 'UDC_T':4, 'UDC_P':5}
self.toTensor = ToTensor()
self.set_dataset(task)
def _init_input_ids(self):
if self.task_idx == 0:
self.ids = []
name_list = os.listdir(self.args.derain_path + 'input/')
self.ids += [self.args.derain_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 1:
self.ids = []
name_list = os.listdir(self.args.dehaze_path + 'input/')
self.ids += [self.args.dehaze_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 2:
self.ids = []
name_list = os.listdir(self.args.deblur_path + 'input/')
self.ids += [self.args.deblur_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 3:
self.ids = []
name_list = os.listdir(self.args.low_light_path + 'input/')
self.ids += [self.args.low_light_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 4:
self.ids = []
name_list = os.listdir(self.args.udc_T_path + 'input/')
self.ids += [self.args.udc_T_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 5:
self.ids = []
name_list = os.listdir(self.args.udc_P_path + 'input/')
self.ids += [self.args.udc_P_path + 'input/' + id_ for id_ in name_list]
self.length = len(self.ids)
def _get_gt_path(self, degraded_name):
if self.task_idx == 0:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 1:
dir_name = degraded_name.split("input")[0] + 'target/'
name = degraded_name.split('/')[-1].split('_')[0] + '.png'
gt_name = dir_name + name
elif self.task_idx == 2:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 3:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 4:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 5:
gt_name = degraded_name.replace("input", "target")
return gt_name
def set_dataset(self, task):
self.task_idx = self.task_dict[task]
self._init_input_ids()
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def __getitem__(self, idx):
degraded_path = self.ids[idx]
clean_path = self._get_gt_path(degraded_path)
degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32)
clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32)
clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img)
degraded_name = degraded_path.split('/')[-1][:-4]
return [degraded_name], degraded_img, clean_img
def __len__(self):
return self.length
def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + 'denoise/' + str(sigma) + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
dataset.set_sigma(sigma)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0]
|
class DenoiseTestDataset(Dataset):
def __init__(self, args, dataset="CBSD68"):
super(DenoiseTestDataset, self).__init__()
self.args = args
self.clean_ids = []
self.sigma = 15
self.dataset_dict = {'CBSD68': 0, 'urban100': 1, 'Kodak24':2}
self.set_dataset(dataset)
self.toTensor = ToTensor()
def _init_clean_ids(self):
if self.task_idx == 0:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_CBSD68_path)
self.clean_ids += [self.args.denoise_CBSD68_path + id_ for id_ in name_list]
elif self.task_idx == 1:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_urban100_path)
self.clean_ids += [self.args.denoise_urban100_path + id_ for id_ in name_list]
elif self.task_idx == 2:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_Kodak24_path)
self.clean_ids += [self.args.denoise_Kodak24_path + id_ for id_ in name_list]
self.num_clean = len(self.clean_ids)
def set_dataset(self, dataset):
self.task_idx = self.dataset_dict[dataset]
self._init_clean_ids()
def _add_gaussian_noise(self, clean_patch):
noise = np.random.randn(*clean_patch.shape)
noisy_patch = np.clip(clean_patch + noise * self.sigma, 0, 255).astype(np.uint8)
return noisy_patch, clean_patch
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def set_sigma(self, sigma):
self.sigma = sigma
def __getitem__(self, clean_id):
clean_img = crop_HWC_img(np.array(Image.open(self.clean_ids[clean_id]).convert('RGB')), base=32)
clean_name = self.clean_ids[clean_id].split("/")[-1].split('.')[0]
noisy_img, _ = self._add_gaussian_noise(clean_img)
clean_img, noisy_img = self.toTensor(clean_img), self.toTensor(noisy_img)
return [clean_name], noisy_img, clean_img
def __len__(self):
return self.num_clean
class DerainDehazeDataset(Dataset):
def __init__(self, args, task="derain"):
super(DerainDehazeDataset, self).__init__()
self.ids = []
self.task_idx = 0
self.args = args
self.task_dict = {'derain': 0, 'dehaze': 1, 'deblur':2, 'low-light':3, 'UDC_T':4, 'UDC_P':5}
self.toTensor = ToTensor()
self.set_dataset(task)
def _init_input_ids(self):
if self.task_idx == 0:
self.ids = []
name_list = os.listdir(self.args.derain_path + 'input/')
self.ids += [self.args.derain_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 1:
self.ids = []
name_list = os.listdir(self.args.dehaze_path + 'input/')
self.ids += [self.args.dehaze_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 2:
self.ids = []
name_list = os.listdir(self.args.deblur_path + 'input/')
self.ids += [self.args.deblur_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 3:
self.ids = []
name_list = os.listdir(self.args.low_light_path + 'input/')
self.ids += [self.args.low_light_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 4:
self.ids = []
name_list = os.listdir(self.args.udc_T_path + 'input/')
self.ids += [self.args.udc_T_path + 'input/' + id_ for id_ in name_list]
elif self.task_idx == 5:
self.ids = []
name_list = os.listdir(self.args.udc_P_path + 'input/')
self.ids += [self.args.udc_P_path + 'input/' + id_ for id_ in name_list]
self.length = len(self.ids)
def _get_gt_path(self, degraded_name):
if self.task_idx == 0:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 1:
dir_name = degraded_name.split("input")[0] + 'target/'
name = degraded_name.split('/')[-1].split('_')[0] + '.png'
gt_name = dir_name + name
elif self.task_idx == 2:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 3:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 4:
gt_name = degraded_name.replace("input", "target")
elif self.task_idx == 5:
gt_name = degraded_name.replace("input", "target")
return gt_name
def set_dataset(self, task):
self.task_idx = self.task_dict[task]
self._init_input_ids()
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
return y[:,:,None].astype(np.float32)
def __getitem__(self, idx):
degraded_path = self.ids[idx]
clean_path = self._get_gt_path(degraded_path)
degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32)
clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32)
clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img)
degraded_name = degraded_path.split('/')[-1][:-4]
return [degraded_name], degraded_img, clean_img
def __len__(self):
return self.length
def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True):
logger = logging.getLogger('base')
output_path = opt.output_path + 'denoise/' + str(sigma) + '/'
# subprocess.check_output(['mkdir', '-p', output_path])
mkdir(output_path)
dataset.set_dataset(task)
dataset.set_sigma(sigma)
testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)
psnr = AverageMeter()
ssim = AverageMeter()
with torch.no_grad():
for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader):
degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda()
restored = net(degrad_patch)
if type(restored) == list:
restored = restored[0] | temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch) | 8 | 2023-12-07 10:58:34+00:00 | 8k |
TACJu/Compositor | Compositor_Mask2Former/mask2former/maskformer_model.py | [
{
"identifier": "SetCriterion",
"path": "Compositor_Mask2Former/mask2former/modeling/criterion.py",
"snippet": "class SetCriterion(nn.Module):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes a... | from typing import Tuple
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from detectron2.utils.memory import retry_if_cuda_oom
from .modeling.criterion import SetCriterion
from .modeling.matcher import HungarianMatcher
import torch | 4,136 | # Copyright (c) Facebook, Inc. and its affiliates.
@META_ARCH_REGISTRY.register()
class MaskFormer(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# inference
semantic_on: bool,
panoptic_on: bool,
instance_on: bool,
test_topk_per_image: int,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
# additional args
self.semantic_on = semantic_on
self.instance_on = instance_on
self.panoptic_on = panoptic_on
self.test_topk_per_image = test_topk_per_image
if not self.semantic_on:
assert self.sem_seg_postprocess_before_inference
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
| # Copyright (c) Facebook, Inc. and its affiliates.
@META_ARCH_REGISTRY.register()
class MaskFormer(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# inference
semantic_on: bool,
panoptic_on: bool,
instance_on: bool,
test_topk_per_image: int,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
# additional args
self.semantic_on = semantic_on
self.instance_on = instance_on
self.panoptic_on = panoptic_on
self.test_topk_per_image = test_topk_per_image
if not self.semantic_on:
assert self.sem_seg_postprocess_before_inference
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion | matcher = HungarianMatcher( | 1 | 2023-12-12 11:49:28+00:00 | 8k |
turbopuffer/turbopuffer-python | turbopuffer/namespace.py | [
{
"identifier": "Cursor",
"path": "turbopuffer/vectors.py",
"snippet": "class Cursor(str):\n pass"
},
{
"identifier": "VectorResult",
"path": "turbopuffer/vectors.py",
"snippet": "class VectorResult:\n \"\"\"\n The VectorResult type represents a set of vectors that are the resul... | import sys
import turbopuffer as tpuf
from turbopuffer.vectors import Cursor, VectorResult, VectorColumns, VectorRow, batch_iter
from turbopuffer.backend import Backend
from turbopuffer.query import VectorQuery, FilterTuple
from typing import Dict, List, Optional, Iterable, Union, overload | 5,770 | def upsert(self, data: Union[Iterable[dict], Iterable[VectorRow]]) -> None:
"""
Creates or updates a multiple vectors provided as a list or iterator.
If this call succeeds, data is guaranteed to be durably written to object storage.
Upserting a vector will overwrite any existing vector with the same ID.
"""
...
@overload
def upsert(self, data: VectorResult) -> None:
"""
Creates or updates multiple vectors.
If this call succeeds, data is guaranteed to be durably written to object storage.
Upserting a vector will overwrite any existing vector with the same ID.
"""
...
def upsert(self, data=None, ids=None, vectors=None, attributes=None) -> None:
if data is None:
if ids is not None and vectors is not None:
return self.upsert(VectorColumns(ids=ids, vectors=vectors, attributes=attributes))
else:
raise ValueError('upsert() requires both ids= and vectors= be set.')
elif isinstance(data, VectorColumns):
# "if None in data.vectors:" is not supported because data.vectors might be a list of np.ndarray
# None == pd.ndarray is an ambiguous comparison in this case.
for vec in data.vectors:
if vec is None:
raise ValueError('upsert() call would result in a vector deletion, use Namespace.delete([ids...]) instead.')
response = self.backend.make_api_request('vectors', self.name, payload=data.__dict__)
elif isinstance(data, VectorRow):
raise ValueError('upsert() should be called on a list of vectors, got single vector.')
elif isinstance(data, list):
if isinstance(data[0], dict):
return self.upsert(VectorColumns.from_rows(data))
elif isinstance(data[0], VectorRow):
return self.upsert(VectorColumns.from_rows(data))
elif isinstance(data[0], VectorColumns):
for columns in data:
self.upsert(columns)
return
else:
raise ValueError(f'Unsupported list data type: {type(data[0])}')
elif isinstance(data, dict):
if 'id' in data:
raise ValueError('upsert() should be called on a list of vectors, got single vector.')
elif 'ids' in data:
return self.upsert(VectorColumns.from_dict(data))
else:
raise ValueError('Provided dict is missing ids.')
elif 'pandas' in sys.modules and isinstance(data, sys.modules['pandas'].DataFrame):
if 'id' not in data.keys():
raise ValueError('Provided pd.DataFrame is missing an id column.')
if 'vector' not in data.keys():
raise ValueError('Provided pd.DataFrame is missing a vector column.')
# start = time.monotonic()
for i in range(0, len(data), tpuf.upsert_batch_size):
batch = data[i:i+tpuf.upsert_batch_size]
attributes = dict()
for key, values in batch.items():
if key != 'id' and key != 'vector':
attributes[key] = values.tolist()
columns = tpuf.VectorColumns(
ids=batch['id'].tolist(),
vectors=batch['vector'].transform(lambda x: x.tolist()).tolist(),
attributes=attributes
)
# time_diff = time.monotonic() - start
# print(f"Batch {columns.ids[0]}..{columns.ids[-1]} begin:", time_diff, '/', len(batch), '=', len(batch)/time_diff)
# before = time.monotonic()
# print(columns)
self.upsert(columns)
# time_diff = time.monotonic() - before
# print(f"Batch {columns.ids[0]}..{columns.ids[-1]} time:", time_diff, '/', len(batch), '=', len(batch)/time_diff)
# start = time.monotonic()
elif isinstance(data, Iterable):
# start = time.monotonic()
for batch in batch_iter(data, tpuf.upsert_batch_size):
# time_diff = time.monotonic() - start
# print('Batch begin:', time_diff, '/', len(batch), '=', len(batch)/time_diff)
# before = time.monotonic()
self.upsert(batch)
# time_diff = time.monotonic() - before
# print('Batch time:', time_diff, '/', len(batch), '=', len(batch)/time_diff)
# start = time.monotonic()
return
else:
raise ValueError(f'Unsupported data type: {type(data)}')
assert response.get('status', '') == 'OK', f'Invalid upsert() response: {response}'
def delete(self, ids: Union[int, str, List[int], List[str]]) -> None:
"""
Deletes vectors by id.
"""
if isinstance(ids, int) or isinstance(ids, str):
response = self.backend.make_api_request('vectors', self.name, payload={
'ids': [ids],
'vectors': [None],
})
elif isinstance(ids, list):
response = self.backend.make_api_request('vectors', self.name, payload={
'ids': ids,
'vectors': [None] * len(ids),
})
else:
raise ValueError(f'Unsupported ids type: {type(ids)}')
assert response.get('status', '') == 'OK', f'Invalid delete() response: {response}'
@overload
def query(self,
vector: Optional[List[float]] = None,
distance_metric: Optional[str] = None,
top_k: int = 10,
include_vectors: bool = False,
include_attributes: Optional[Union[List[str], bool]] = None,
|
class Namespace:
"""
The Namespace type represents a set of vectors stored in turbopuffer.
Within a namespace, vectors are uniquely referred to by their ID.
All vectors in a namespace must have the same dimensions.
"""
name: str
backend: Backend
def __init__(self, name: str, api_key: Optional[str] = None):
"""
Creates a new turbopuffer.Namespace object for querying the turbopuffer API.
This function does not make any API calls on its own.
Specifying an api_key here will override the global configuration for API calls to this namespace.
"""
self.name = name
self.backend = Backend(api_key)
def __str__(self) -> str:
return f'tpuf-namespace:{self.name}'
@overload
def upsert(self, ids: Union[List[int], List[str]], vectors: List[List[float]], attributes: Optional[Dict[str, List[Optional[str]]]] = None) -> None:
"""
Creates or updates multiple vectors provided in a column-oriented layout.
If this call succeeds, data is guaranteed to be durably written to object storage.
Upserting a vector will overwrite any existing vector with the same ID.
"""
...
@overload
def upsert(self, data: Union[dict, VectorColumns]) -> None:
"""
Creates or updates multiple vectors provided in a column-oriented layout.
If this call succeeds, data is guaranteed to be durably written to object storage.
Upserting a vector will overwrite any existing vector with the same ID.
"""
...
@overload
def upsert(self, data: Union[Iterable[dict], Iterable[VectorRow]]) -> None:
"""
Creates or updates a multiple vectors provided as a list or iterator.
If this call succeeds, data is guaranteed to be durably written to object storage.
Upserting a vector will overwrite any existing vector with the same ID.
"""
...
@overload
def upsert(self, data: VectorResult) -> None:
"""
Creates or updates multiple vectors.
If this call succeeds, data is guaranteed to be durably written to object storage.
Upserting a vector will overwrite any existing vector with the same ID.
"""
...
def upsert(self, data=None, ids=None, vectors=None, attributes=None) -> None:
if data is None:
if ids is not None and vectors is not None:
return self.upsert(VectorColumns(ids=ids, vectors=vectors, attributes=attributes))
else:
raise ValueError('upsert() requires both ids= and vectors= be set.')
elif isinstance(data, VectorColumns):
# "if None in data.vectors:" is not supported because data.vectors might be a list of np.ndarray
# None == pd.ndarray is an ambiguous comparison in this case.
for vec in data.vectors:
if vec is None:
raise ValueError('upsert() call would result in a vector deletion, use Namespace.delete([ids...]) instead.')
response = self.backend.make_api_request('vectors', self.name, payload=data.__dict__)
elif isinstance(data, VectorRow):
raise ValueError('upsert() should be called on a list of vectors, got single vector.')
elif isinstance(data, list):
if isinstance(data[0], dict):
return self.upsert(VectorColumns.from_rows(data))
elif isinstance(data[0], VectorRow):
return self.upsert(VectorColumns.from_rows(data))
elif isinstance(data[0], VectorColumns):
for columns in data:
self.upsert(columns)
return
else:
raise ValueError(f'Unsupported list data type: {type(data[0])}')
elif isinstance(data, dict):
if 'id' in data:
raise ValueError('upsert() should be called on a list of vectors, got single vector.')
elif 'ids' in data:
return self.upsert(VectorColumns.from_dict(data))
else:
raise ValueError('Provided dict is missing ids.')
elif 'pandas' in sys.modules and isinstance(data, sys.modules['pandas'].DataFrame):
if 'id' not in data.keys():
raise ValueError('Provided pd.DataFrame is missing an id column.')
if 'vector' not in data.keys():
raise ValueError('Provided pd.DataFrame is missing a vector column.')
# start = time.monotonic()
for i in range(0, len(data), tpuf.upsert_batch_size):
batch = data[i:i+tpuf.upsert_batch_size]
attributes = dict()
for key, values in batch.items():
if key != 'id' and key != 'vector':
attributes[key] = values.tolist()
columns = tpuf.VectorColumns(
ids=batch['id'].tolist(),
vectors=batch['vector'].transform(lambda x: x.tolist()).tolist(),
attributes=attributes
)
# time_diff = time.monotonic() - start
# print(f"Batch {columns.ids[0]}..{columns.ids[-1]} begin:", time_diff, '/', len(batch), '=', len(batch)/time_diff)
# before = time.monotonic()
# print(columns)
self.upsert(columns)
# time_diff = time.monotonic() - before
# print(f"Batch {columns.ids[0]}..{columns.ids[-1]} time:", time_diff, '/', len(batch), '=', len(batch)/time_diff)
# start = time.monotonic()
elif isinstance(data, Iterable):
# start = time.monotonic()
for batch in batch_iter(data, tpuf.upsert_batch_size):
# time_diff = time.monotonic() - start
# print('Batch begin:', time_diff, '/', len(batch), '=', len(batch)/time_diff)
# before = time.monotonic()
self.upsert(batch)
# time_diff = time.monotonic() - before
# print('Batch time:', time_diff, '/', len(batch), '=', len(batch)/time_diff)
# start = time.monotonic()
return
else:
raise ValueError(f'Unsupported data type: {type(data)}')
assert response.get('status', '') == 'OK', f'Invalid upsert() response: {response}'
def delete(self, ids: Union[int, str, List[int], List[str]]) -> None:
"""
Deletes vectors by id.
"""
if isinstance(ids, int) or isinstance(ids, str):
response = self.backend.make_api_request('vectors', self.name, payload={
'ids': [ids],
'vectors': [None],
})
elif isinstance(ids, list):
response = self.backend.make_api_request('vectors', self.name, payload={
'ids': ids,
'vectors': [None] * len(ids),
})
else:
raise ValueError(f'Unsupported ids type: {type(ids)}')
assert response.get('status', '') == 'OK', f'Invalid delete() response: {response}'
@overload
def query(self,
vector: Optional[List[float]] = None,
distance_metric: Optional[str] = None,
top_k: int = 10,
include_vectors: bool = False,
include_attributes: Optional[Union[List[str], bool]] = None, | filters: Optional[Dict[str, List[FilterTuple]]] = None) -> VectorResult: | 6 | 2023-12-12 06:52:27+00:00 | 8k |
Prismadic/magnet | magnet/utils/mlx/mistral.py | [
{
"identifier": "_f",
"path": "magnet/utils/globals.py",
"snippet": "def _f(\n tag: str = None,\n body: any = None,\n no_print: bool = False,\n luxe: bool = False\n):\n \"\"\"\n The `_f` function is a logging utility that prints messages with different tags and colors based on\n the... | import json
import time
import mlx.core as mx
import mlx.nn as nn
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple
from mlx.utils import tree_unflatten
from sentencepiece import SentencePieceProcessor
from magnet.utils.globals import _f
from magnet.utils.data_classes import MistralArgs | 3,903 | @property
def eos_id(self) -> int:
"""
Returns the ID of the end-of-sentence token in the tokenizer's model.
Returns:
int: The ID of the end-of-sentence token in the tokenizer's model.
"""
return self._model.eos_id()
@property
def pad_id(self) -> int:
"""
Returns the ID of the padding token in the tokenizer's model.
Returns:
int: The ID of the padding token in the tokenizer's model.
"""
return self._model.pad_id()
def encode(self, s: str) -> List[int]:
return [self._model.bos_id(), *self._model.encode(s)]
def decode(self, t: List[int]) -> str:
"""
Decodes a list of token IDs into a string.
Args:
t (List[int]): A list of token IDs to be decoded into a string.
Returns:
str: The decoded string corresponding to the input list of token IDs.
"""
out = self._model.decode(t)
if t and self._model.id_to_piece(t[0])[0] == self._sep:
return " " + out
return out
def load_model(folder: str):
"""
Load a pre-trained language model and tokenizer from a specified folder.
Args:
folder (str): The path to the folder containing the pre-trained model.
Returns:
model (Mistral): The loaded pre-trained language model.
tokenizer (Tokenizer): The initialized tokenizer.
"""
model_path = Path(folder)
tokenizer = Tokenizer(str(model_path / "tokenizer.model"))
with open(model_path / "config.json", "r") as f:
config = json.loads(f.read())
config.pop("sliding_window", None)
config.pop("model_type", None)
quantization = config.pop("quantization", None)
model_args = MistralArgs(**config)
weights = mx.load(str(model_path / "weights.npz"))
weights = tree_unflatten(list(weights.items()))
model = Mistral(model_args)
if quantization is not None:
nn.QuantizedLinear.quantize_module(model, **quantization)
model.update(weights)
mx.eval(model.parameters())
return model, tokenizer
def infer(prompt: mx.array, model: Mistral, temp: Optional[float] = 0.0):
"""
Generates a sequence of tokens using a pre-trained language model.
Args:
prompt (mx.array): An mxnet array representing the initial prompt for generating the sequence.
model (Mistral): An instance of the Mistral class, which is a pre-trained language model.
temp (float, optional): A float representing the temperature parameter for controlling the randomness of the generated sequence. Defaults to 0.0.
Yields:
mx.array: Generated tokens, one by one.
Example:
prompt = mx.array(tokenizer.encode("The cat"))
model = Mistral(args)
temp = 0.8
for token in infer(prompt, model, temp):
print(token)
"""
def sample(logits):
if temp == 0:
return mx.argmax(logits, axis=-1)
else:
return mx.random.categorical(logits * (1 / temp))
logits, cache = model(prompt[None])
y = sample(logits[:, -1, :])
yield y
while True:
logits, cache = model(y[:, None], cache)
y = sample(logits.squeeze(1))
yield y
def generate(payload):
"""
Generate a sequence of tokens using a pre-trained language model.
Args:
payload (dict): A dictionary containing the following keys:
- 'seed' (int): The random seed for reproducibility.
- 'model_path' (str): The path to the pre-trained model.
- 'prompt' (str): The initial prompt for generating the sequence.
- 'temp' (float): The temperature parameter for controlling the randomness of the generated sequence.
- 'max_tokens' (int): The maximum number of tokens to generate.
Returns:
str: The generated sequence of tokens decoded into a string.
"""
mx.random.seed(payload['seed'])
| # Copyright © 2023 Apple Inc.
# docstrings - 2023 Prismadic, LLC.
class RMSNorm(nn.Module):
def __init__(self, dims: int, eps: float = 1e-5):
"""
Initializes the attributes of the RMSNorm class.
Args:
dims (int): The number of dimensions for the weight attribute.
eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-5.
Returns:
None
"""
super(RMSNorm, self).__init__()
self.dims = dims
self.eps = eps
def forward(self, x):
"""
Applies RMS normalization to the input array.
Args:
x (torch.Tensor): The input array to be normalized.
Returns:
torch.Tensor: The normalized array.
"""
return x / torch.sqrt(torch.mean(x**2, dim=self.dims, keepdim=True) + self.eps)
super().__init__()
self.weight = mx.ones((dims,))
self.eps = eps
def _norm(self, x):
return x * mx.rsqrt(x.square().mean(-1, keepdims=True) + self.eps)
def __call__(self, x):
"""
Apply RMS normalization to the input array `x` and return the normalized output.
Args:
x (ndarray): The input array to be normalized.
Returns:
ndarray: The normalized output array, which is the result of applying RMS normalization to the input array `x`.
"""
output = self._norm(x.astype(mx.float32)).astype(x.dtype)
return self.weight * output
class Attention(nn.Module):
"""
The `Attention` class is responsible for performing the attention computation in a transformer block.
Args:
args (MistralArgs): An instance of `MistralArgs` that contains the arguments for the attention computation.
Attributes:
args (MistralArgs): An instance of `MistralArgs` that contains the arguments for the attention computation.
n_heads (int): The number of attention heads.
n_kv_heads (int): The number of key-value attention heads.
repeats (int): The number of times to repeat the key-value attention heads.
scale (float): The scaling factor for the attention scores.
wq (nn.Linear): Linear layer for the query projection.
wk (nn.Linear): Linear layer for the key projection.
wv (nn.Linear): Linear layer for the value projection.
wo (nn.Linear): Linear layer for the output projection.
rope (nn.RoPE): Instance of `nn.RoPE` class for relative positional encoding.
"""
def __init__(self, args: MistralArgs):
super().__init__()
self.args = args
self.n_heads: int = args.n_heads
self.n_kv_heads: int = args.n_kv_heads
self.repeats = self.n_heads // self.n_kv_heads
self.scale = self.args.head_dim**-0.5
self.wq = nn.Linear(args.dim, args.n_heads * args.head_dim, bias=False)
self.wk = nn.Linear(args.dim, args.n_kv_heads *
args.head_dim, bias=False)
self.wv = nn.Linear(args.dim, args.n_kv_heads *
args.head_dim, bias=False)
self.wo = nn.Linear(args.n_heads * args.head_dim, args.dim, bias=False)
self.rope = nn.RoPE(args.head_dim, traditional=True)
def __call__(
self,
x: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Tuple[mx.array, mx.array]] = None,
) -> mx.array:
"""
Perform attention computation on the input array `x`.
Args:
x (mx.array): The input array of shape (batch_size, sequence_length, dimension).
mask (Optional[mx.array]): An optional mask array of shape (batch_size, sequence_length) to mask certain elements in the input array.
cache (Optional[Tuple[mx.array, mx.array]]): An optional cache tuple containing two arrays of shape (batch_size, sequence_length, dimension) to store intermediate results.
Returns:
mx.array: The final output array of shape (batch_size, sequence_length, dimension).
Optional[Tuple[mx.array, mx.array]]: The updated cache tuple containing two arrays of shape (batch_size, sequence_length, dimension).
"""
B, L, D = x.shape
queries, keys, values = self.wq(x), self.wk(x), self.wv(x)
# Prepare the queries, keys and values for the attention computation
queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3)
keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
values = values.reshape(
B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
def repeat(a):
a = mx.concatenate([mx.expand_dims(a, 2)] * self.repeats, axis=2)
return a.reshape([B, self.n_heads, L, -1])
keys, values = map(repeat, (keys, values))
if cache is not None:
key_cache, value_cache = cache
queries = self.rope(queries, offset=key_cache.shape[2])
keys = self.rope(keys, offset=key_cache.shape[2])
keys = mx.concatenate([key_cache, keys], axis=2)
values = mx.concatenate([value_cache, values], axis=2)
else:
queries = self.rope(queries)
keys = self.rope(keys)
scores = (queries * self.scale) @ keys.transpose(0, 1, 3, 2)
if mask is not None:
scores += mask
scores = mx.softmax(scores.astype(mx.float32),
axis=-1).astype(scores.dtype)
output = (scores @ values).transpose(0, 2, 1, 3).reshape(B, L, -1)
return self.wo(output), (keys, values)
class FeedForward(nn.Module):
"""
Applies a feed-forward neural network to the input data.
Args:
args (MistralArgs): The arguments for the model.
Example Usage:
args = MistralArgs(dim=512, hidden_dim=2048)
feed_forward = FeedForward(args)
output = feed_forward(input)
"""
def __init__(self, args: MistralArgs):
"""
Initializes the FeedForward class.
Args:
args (MistralArgs): The arguments for the model.
"""
super().__init__()
self.w1 = nn.Linear(args.dim, args.hidden_dim, bias=False)
self.w2 = nn.Linear(args.hidden_dim, args.dim, bias=False)
self.w3 = nn.Linear(args.dim, args.hidden_dim, bias=False)
def __call__(self, x) -> mx.array:
"""
Applies the feed-forward neural network to the input data.
Args:
x: The input data.
Returns:
mx.array: The output of the feed-forward neural network.
"""
return self.w2(nn.silu(self.w1(x)) * self.w3(x))
class TransformerBlock(nn.Module):
"""
Initializes the attributes of the TransformerBlock class and creates instances of other required classes.
Args:
args (MistralArgs): An instance of the MistralArgs class that contains the arguments for the transformer block.
Example Usage:
args = MistralArgs(dim=512, n_heads=8, norm_eps=1e-5)
block = TransformerBlock(args)
Flow:
1. Initialize the n_heads attribute of the TransformerBlock instance with the value from args.n_heads.
2. Initialize the dim attribute of the TransformerBlock instance with the value from args.dim.
3. Create an instance of the Attention class and assign it to the attention attribute of the TransformerBlock instance, passing args as an argument.
4. Create an instance of the FeedForward class and assign it to the feed_forward attribute of the TransformerBlock instance, passing args as an argument.
5. Create an instance of the RMSNorm class and assign it to the attention_norm attribute of the TransformerBlock instance, passing args.dim and args.norm_eps as arguments.
6. Create an instance of the RMSNorm class and assign it to the ffn_norm attribute of the TransformerBlock instance, passing args.dim and args.norm_eps as arguments.
7. Assign the args argument to the args attribute of the TransformerBlock instance.
Returns:
None
"""
def __init__(self, args: MistralArgs):
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.attention = Attention(args)
self.feed_forward = FeedForward(args=args)
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
self.args = args
def __call__(
self,
x: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Tuple[mx.array, mx.array]] = None,
) -> mx.array:
"""
Apply the TransformerBlock to the input array.
Args:
x (mx.array): The input array of shape (batch_size, sequence_length).
mask (Optional[mx.array]): An optional mask array of shape (batch_size, sequence_length) to mask certain elements in the input array.
cache (Optional[Tuple[mx.array, mx.array]]): An optional cache tuple containing two arrays of shape (batch_size, sequence_length, hidden_dim) to store intermediate results.
Returns:
out (mx.array): The final output array of shape (batch_size, sequence_length, hidden_dim).
cache (Optional[Tuple[mx.array, mx.array]]): The updated cache tuple containing two arrays of shape (batch_size, sequence_length, hidden_dim).
"""
r, cache = self.attention(self.attention_norm(x), mask, cache)
h = x + r
r = self.feed_forward(self.ffn_norm(h))
out = h + r
return out, cache
class Mistral(nn.Module):
"""
A language model that performs a series of operations on an input array using transformer blocks.
Args:
args (MistralArgs): The model arguments that define the dimensions and parameters of the language model.
Attributes:
args (MistralArgs): The model arguments that define the dimensions and parameters of the language model.
vocab_size (int): The size of the vocabulary.
n_layers (int): The number of transformer blocks in the language model.
tok_embeddings (nn.Embedding): The token embedding layer.
layers (List[TransformerBlock]): The list of transformer blocks.
norm (RMSNorm): The RMS normalization layer.
output (nn.Linear): The output layer.
"""
def __init__(self, args: MistralArgs):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.n_layers = args.n_layers
assert self.vocab_size > 0
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
self.layers = [TransformerBlock(args=args)
for _ in range(args.n_layers)]
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
def __call__(self, inputs: mx.array, cache=None):
"""
Perform a series of operations on the input array using the layers defined in the class.
Args:
inputs (mx.array): An array representing the input data. It should have shape (batch_size, sequence_length).
cache (list, optional): The cache value for each layer. Default is None.
Returns:
mx.array: The output array after passing through the output layer and applying normalization. It has shape (batch_size, sequence_length, vocab_size).
list: The updated cache after processing the input array through the layers. It is a list of length equal to the number of layers in the model, where each element is the cache value for the corresponding layer.
"""
h = self.tok_embeddings(inputs)
mask = None
if h.shape[1] > 1:
mask = nn.MultiHeadAttention.create_additive_causal_mask(
h.shape[1])
mask = mask.astype(h.dtype)
# Rest of the code remains the same
if cache is None:
cache = [None] * len(self.layers)
for e, layer in enumerate(self.layers):
h, cache[e] = layer(h, mask, cache[e])
return self.output(self.norm(h)), cache
class Tokenizer:
"""
Initializes the tokenizer object by loading a SentencePiece model from a given file path and setting the separator character.
Args:
model_path (str): The file path of the SentencePiece model.
Raises:
AssertionError: If the file specified by `model_path` does not exist.
AssertionError: If the vocabulary size of the model does not match the number of pieces in the model.
"""
def __init__(self, model_path: str):
assert Path(model_path).exists(), model_path
self._model = SentencePieceProcessor(model_file=model_path)
self._sep = "▁"
assert self._model.vocab_size() == self._model.get_piece_size()
@property
def eos_id(self) -> int:
"""
Returns the ID of the end-of-sentence token in the tokenizer's model.
Returns:
int: The ID of the end-of-sentence token in the tokenizer's model.
"""
return self._model.eos_id()
@property
def pad_id(self) -> int:
"""
Returns the ID of the padding token in the tokenizer's model.
Returns:
int: The ID of the padding token in the tokenizer's model.
"""
return self._model.pad_id()
def encode(self, s: str) -> List[int]:
return [self._model.bos_id(), *self._model.encode(s)]
def decode(self, t: List[int]) -> str:
"""
Decodes a list of token IDs into a string.
Args:
t (List[int]): A list of token IDs to be decoded into a string.
Returns:
str: The decoded string corresponding to the input list of token IDs.
"""
out = self._model.decode(t)
if t and self._model.id_to_piece(t[0])[0] == self._sep:
return " " + out
return out
def load_model(folder: str):
"""
Load a pre-trained language model and tokenizer from a specified folder.
Args:
folder (str): The path to the folder containing the pre-trained model.
Returns:
model (Mistral): The loaded pre-trained language model.
tokenizer (Tokenizer): The initialized tokenizer.
"""
model_path = Path(folder)
tokenizer = Tokenizer(str(model_path / "tokenizer.model"))
with open(model_path / "config.json", "r") as f:
config = json.loads(f.read())
config.pop("sliding_window", None)
config.pop("model_type", None)
quantization = config.pop("quantization", None)
model_args = MistralArgs(**config)
weights = mx.load(str(model_path / "weights.npz"))
weights = tree_unflatten(list(weights.items()))
model = Mistral(model_args)
if quantization is not None:
nn.QuantizedLinear.quantize_module(model, **quantization)
model.update(weights)
mx.eval(model.parameters())
return model, tokenizer
def infer(prompt: mx.array, model: Mistral, temp: Optional[float] = 0.0):
"""
Generates a sequence of tokens using a pre-trained language model.
Args:
prompt (mx.array): An mxnet array representing the initial prompt for generating the sequence.
model (Mistral): An instance of the Mistral class, which is a pre-trained language model.
temp (float, optional): A float representing the temperature parameter for controlling the randomness of the generated sequence. Defaults to 0.0.
Yields:
mx.array: Generated tokens, one by one.
Example:
prompt = mx.array(tokenizer.encode("The cat"))
model = Mistral(args)
temp = 0.8
for token in infer(prompt, model, temp):
print(token)
"""
def sample(logits):
if temp == 0:
return mx.argmax(logits, axis=-1)
else:
return mx.random.categorical(logits * (1 / temp))
logits, cache = model(prompt[None])
y = sample(logits[:, -1, :])
yield y
while True:
logits, cache = model(y[:, None], cache)
y = sample(logits.squeeze(1))
yield y
def generate(payload):
"""
Generate a sequence of tokens using a pre-trained language model.
Args:
payload (dict): A dictionary containing the following keys:
- 'seed' (int): The random seed for reproducibility.
- 'model_path' (str): The path to the pre-trained model.
- 'prompt' (str): The initial prompt for generating the sequence.
- 'temp' (float): The temperature parameter for controlling the randomness of the generated sequence.
- 'max_tokens' (int): The maximum number of tokens to generate.
Returns:
str: The generated sequence of tokens decoded into a string.
"""
mx.random.seed(payload['seed']) | _f('wait', f"loading model from {payload['model_path']}") | 0 | 2023-12-12 14:11:21+00:00 | 8k |
happyapplehorse/ai-care | src/ai_care/ai_care.py | [
{
"identifier": "Ability",
"path": "src/ai_care/abilities.py",
"snippet": "class Ability:\n def __init__(self, ai_care: AICare):\n self.ai_care = ai_care\n self.abilities: dict = {}\n self._register_abilities()\n\n def _register_abilities(self) -> None:\n for name, meth... | import itertools
import logging
import time
import threading
from abc import ABCMeta, abstractmethod
from typing import Callable, Any, Generator, TypedDict, Literal, cast
from .abilities import Ability
from .choice_execute import choice_execute
from .parse_response import parse_response
from .render_prompt import render_basic_prompt | 4,682 | from __future__ import annotations
logger = logging.getLogger("ai_care")
ChatContext = Any
ConfigKey = Literal["delay", "ask_later_count_limit", "ask_depth", "n_chat_intervals"]
class AICare:
def __init__(self) -> None:
self.timers: dict[int, AICareTimer] = {}
self.detectors: dict[str, Detector] = {}
self.sensors: dict[str, dict] = {}
| from __future__ import annotations
logger = logging.getLogger("ai_care")
ChatContext = Any
ConfigKey = Literal["delay", "ask_later_count_limit", "ask_depth", "n_chat_intervals"]
class AICare:
def __init__(self) -> None:
self.timers: dict[int, AICareTimer] = {}
self.detectors: dict[str, Detector] = {}
self.sensors: dict[str, dict] = {} | self.ability: Ability = Ability(self) | 0 | 2023-12-08 05:45:07+00:00 | 8k |
neu-spiral/multi-label-emg | multi_label_emg/train.py | [
{
"identifier": "load_data_dict",
"path": "multi_label_emg/data.py",
"snippet": "def load_data_dict():\n \"\"\"\n Loads features and labels from subject folders into a single dictionary as described below.\n NOTE - preprocessing should be been done first to extract features from raw data (see R... | import sys
import numpy as np
import plotly.graph_objects as go
import argparse
from loguru import logger
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KernelDensity, KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.svm import SVC
from multi_label_emg.data import load_data_dict
from multi_label_emg.models import AvgPairs, ElementwiseMaxPairs, ParallelA, ParallelB
from multi_label_emg.utils import (
NO_DIR_IDX,
NO_MOD_IDX,
RESULTS_DIR,
canonical_coords,
confusion_matrix,
str2bool,
) | 6,869 | k_smallest_idx = np.argpartition(dists, n_per_class)[:n_per_class]
subset_idx = idx[k_smallest_idx]
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_doubles_spaced_quantiles(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, rank items by their distance to the class mean,
and take items with ranks 1, K+1, 2K+1.
The spacing K will be approx (class_size / n_per_class)
"""
# Find class means
class_means = {}
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_means[(d, m)] = np.mean(features_aug[idx], axis=0)
# Subset each class by taking items closest to mean
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
class_mean = class_means[(d, m)]
idx = np.where((labels_2d == (d, m)).all(-1))[0]
dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_dir_mod(
method: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray
):
# Should have 1-hot vector labels
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# Figure out how many items we have per class
# Then use fraction_doubles_per_class to figure out how many doubles we want
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes)))
n_per_class = min(n_per_class, np.min(class_sizes))
logger.info(f"Initial class sizes: {class_sizes}, n_per_class: {n_per_class}")
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_mean = np.mean(features[idx], axis=0)
if method == "subsetInput_uniform":
subset_idx = np.random.choice(idx, n_per_class, replace=False)
elif method == "subsetInput_near_mean":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
subset_idx = idx[ranked_distances[:n_per_class]]
elif method == "subsetInput_spaced_quantiles":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features[subset_idx])
res_y_dir.append(dir_labels[subset_idx])
res_y_mod.append(mod_labels[subset_idx])
res_x = np.concatenate(res_x)
res_y_dir = np.concatenate(res_y_dir)
res_y_mod = np.concatenate(res_y_mod)
labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Class sizes after subset: {class_sizes}")
return res_x, res_y_dir, res_y_mod
def get_augmented_doubles(
method: str,
feature_combine_type: str,
fraction_doubles_per_class: float,
features: np.ndarray,
dir_labels: np.ndarray,
mod_labels: np.ndarray,
):
if feature_combine_type == "avg":
|
def get_name(
subject: str,
seed: int,
parallel_model_type: str,
clf_name: str,
doubles_method: str,
fraction_doubles_per_class: float,
singles_method: str,
rel_fraction_singles_per_class: float,
include_doubles_in_train: bool,
feature_combine_type: str,
):
return "__".join(
[
f"subj={subject}",
f"seed={seed}",
f"par={parallel_model_type}",
f"clf={clf_name}",
f"doubles={doubles_method}",
f"frac_doubles={fraction_doubles_per_class}",
f"singles={singles_method}",
f"frac_singles={rel_fraction_singles_per_class}",
f"incl_doubles={include_doubles_in_train}",
f"feat_type={feature_combine_type}",
]
)
def plot_confusion_matrix(data: np.ndarray):
def make_text(cm):
text = []
for v in cm.flatten():
text.append(f"{round(v, 2)}")
return np.array(text).reshape(cm.shape)
coords, coords_str = canonical_coords()
text = make_text(data)
fig = go.Figure()
fig.update_layout(
# margin=margin,
xaxis=dict(
title="Predicted",
tickangle=-45,
tickmode="array",
ticktext=coords_str,
tickvals=list(range(len(coords_str))),
constrain="domain",
),
yaxis=dict(
title="Actual",
tickmode="array",
ticktext=coords_str,
tickvals=list(range(len(coords_str))),
autorange="reversed",
scaleanchor="x",
scaleratio=1,
constrain="domain",
),
)
fig.add_trace(
go.Heatmap(z=data, text=text, texttemplate="%{text}", zmin=0, zmax=1, colorscale="Blues", showscale=False)
)
return fig
def subset_doubles_uniform(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, take n_per_class items uniformly at random"""
res_x, res_y_dir, res_y_mod = [], [], []
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
subset_idx = np.random.choice(idx, size=n_per_class, replace=False)
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_doubles_near_mean(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, take n_per_class items closest to the mean of these synthetic items"""
# Find class means
class_means = {}
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_means[(d, m)] = np.mean(features_aug[idx], axis=0)
# Subset each class by taking items closest to mean
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
class_mean = class_means[(d, m)]
idx = np.where((labels_2d == (d, m)).all(-1))[0]
dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1)
k_smallest_idx = np.argpartition(dists, n_per_class)[:n_per_class]
subset_idx = idx[k_smallest_idx]
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_doubles_spaced_quantiles(
n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray
):
"""For each class, rank items by their distance to the class mean,
and take items with ranks 1, K+1, 2K+1.
The spacing K will be approx (class_size / n_per_class)
"""
# Find class means
class_means = {}
labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1)
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_means[(d, m)] = np.mean(features_aug[idx], axis=0)
# Subset each class by taking items closest to mean
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
class_mean = class_means[(d, m)]
idx = np.where((labels_2d == (d, m)).all(-1))[0]
dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features_aug[subset_idx])
res_y_dir.append(dir_labels_aug[subset_idx])
res_y_mod.append(mod_labels_aug[subset_idx])
features_aug = np.concatenate(res_x)
dir_labels_aug = np.concatenate(res_y_dir)
mod_labels_aug = np.concatenate(res_y_mod)
return features_aug, dir_labels_aug, mod_labels_aug
def subset_dir_mod(
method: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray
):
# Should have 1-hot vector labels
assert dir_labels.ndim == 2
assert mod_labels.ndim == 2
# check these are all singles
items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX
items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX
items_with_both = np.logical_and(items_with_dir, items_with_mod)
assert np.sum(items_with_both) == 0
labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1)
# Figure out how many items we have per class
# Then use fraction_doubles_per_class to figure out how many doubles we want
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes)))
n_per_class = min(n_per_class, np.min(class_sizes))
logger.info(f"Initial class sizes: {class_sizes}, n_per_class: {n_per_class}")
# For each class, fit a multivariate gaussian and sample the requested number of points
res_x, res_y_dir, res_y_mod = [], [], []
for d, m in np.unique(labels_2d, axis=0):
idx = np.where((labels_2d == (d, m)).all(-1))[0]
class_mean = np.mean(features[idx], axis=0)
if method == "subsetInput_uniform":
subset_idx = np.random.choice(idx, n_per_class, replace=False)
elif method == "subsetInput_near_mean":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
subset_idx = idx[ranked_distances[:n_per_class]]
elif method == "subsetInput_spaced_quantiles":
dists = np.linalg.norm(features[idx] - class_mean, axis=-1)
ranked_distances = np.argsort(dists)
spacing = int(np.floor(len(idx) / n_per_class))
# Since we use floor, we step slightly too little.
# In case this gives us extra items, we also truncate.
subset_idx = idx[ranked_distances[::spacing][:n_per_class]]
n_subset = len(subset_idx)
assert abs(n_subset - n_per_class) <= 1
res_x.append(features[subset_idx])
res_y_dir.append(dir_labels[subset_idx])
res_y_mod.append(mod_labels[subset_idx])
res_x = np.concatenate(res_x)
res_y_dir = np.concatenate(res_y_dir)
res_y_mod = np.concatenate(res_y_mod)
labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1)
class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1]
logger.info(f"Class sizes after subset: {class_sizes}")
return res_x, res_y_dir, res_y_mod
def get_augmented_doubles(
method: str,
feature_combine_type: str,
fraction_doubles_per_class: float,
features: np.ndarray,
dir_labels: np.ndarray,
mod_labels: np.ndarray,
):
if feature_combine_type == "avg": | aug = AvgPairs(-1) | 1 | 2023-12-12 16:50:34+00:00 | 8k |
ebb-earl-co/tidal-wave | tidal_wave/track.py | [
{
"identifier": "manifester",
"path": "tidal_wave/dash.py",
"snippet": "class TidalManifestException(Exception):\nclass S:\nclass SegmentTimeline:\nclass JSONDASHManifest:\nclass XMLDASHManifest:\n def __post_init__(self):\n def __post_init__(self):\n def build_urls(self, session: Session) -> O... | from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Iterable, List, Optional
from mutagen.mp4 import MP4Cover
from requests import Session
from .dash import manifester, JSONDASHManifest, Manifest, XMLDASHManifest
from .media import af_aq, AudioFormat, TAG_MAPPING
from .models import (
AlbumsEndpointResponseJSON,
ArtistsBioResponseJSON,
TracksCreditsResponseJSON,
TracksEndpointResponseJSON,
TracksEndpointStreamResponseJSON,
TracksLyricsResponseJSON,
)
from .requesting import (
fetch_content_length,
http_request_range_headers,
request_albums,
request_artist_bio,
request_credits,
request_lyrics,
request_stream,
request_tracks,
)
from .utils import download_artist_image, download_cover_image, temporary_file
import json
import logging
import re
import shlex
import shutil
import subprocess
import sys
import mutagen
import ffmpeg | 6,260 | self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(
session, self.track_id, aq
)
def set_manifest(self):
"""This method sets self.manifest and self.codec"""
self.manifest: Manifest = manifester(self.stream)
# https://dashif.org/codecs/audio/
if self.manifest.codecs == "flac":
self.codec = "flac"
elif self.manifest.codecs == "mqa":
self.codec = "flac"
elif self.manifest.codecs == "mha1": # Sony 360 Reality Audio
self.codec = "mka"
elif self.manifest.codecs == "mp4a.40.5": # HE-AAC
self.codec = "m4a"
elif self.manifest.codecs == "mp4a.40.29": # HE-AAC v2
self.codec = "m4a"
elif self.manifest.codecs == "mp4a.40.2": # AAC-LC
self.codec = "m4a"
elif self.manifest.codecs == "eac3": # Enhanced AC-3
self.codec = "m4a"
elif self.manifest.codecs == "mp4a.40.34": # MP3
self.codec = "mp3"
def set_album_dir(self, out_dir: Path):
"""This method sets self.album_dir, based on self.album and
out_dir. In particular, self.album_dir is a subdirectory of out_dir
based on the name of the album's artist"""
artist_substring: str = self.album.artist.name.replace("..", "")
album_substring: str = (
f"{self.album.name} " f"[{self.album.id}] [{self.album.release_date.year}]"
)
self.album_dir: Path = out_dir / artist_substring / album_substring
self.album_dir.mkdir(parents=True, exist_ok=True)
if self.album.number_of_volumes > 1:
volume_substring: str = f"Volume {self.metadata.volume_number}"
(self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)
def set_filename(self, audio_format: AudioFormat):
"""This method sets self.filename. It's based on self.metadata
as well as audio_format. Additionally, if the available codecs in
self.manifest don't match audio_format, warnings are logged"""
_track_part: str = f"{self.metadata.track_number:02d} - {self.metadata.name}"
if audio_format == AudioFormat.low:
track_substring: str = f"{_track_part} [L]"
elif audio_format == AudioFormat.high:
track_substring: str = f"{_track_part} [H]"
elif audio_format == AudioFormat.lossless:
track_substring: str = f"{_track_part} [CD]"
elif audio_format == AudioFormat.mqa:
track_substring: str = f"{_track_part} [Q]"
elif audio_format == AudioFormat.hi_res:
track_substring: str = f"{_track_part} [HiRes]"
elif audio_format == AudioFormat.dolby_atmos:
track_substring: str = f"{_track_part} [A]"
elif audio_format == AudioFormat.sony_360_reality_audio:
track_substring: str = f"{_track_part} [360]"
else:
track_substring: str = _track_part
# Check for MQA masquerading as HiRes here
if audio_format == AudioFormat.hi_res:
if self.manifest.codecs == "mqa":
logger.warning(
"Even though HiRes audio format was requested, this track is only "
"available in MQA format. TIDAL regards this as 'HiRes' even though "
"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. "
"Downloading of track will continue, but it will be marked as MQA."
)
self.filename: Optional[str] = f"{_track_part} [Q].{self.codec}"
elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):
logger.warning(
"Even though HiRes audio format was requested, and TIDAL responded to "
"that request without error, this track is only available in lossless "
"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will "
"continue, but it will be marked as Lossless ([CD])."
)
self.filename: Optional[str] = f"{_track_part} [CD].{self.codec}"
else:
self.filename: Optional[str] = f"{track_substring}.{self.codec}"
else:
self.filename: Optional[str] = f"{track_substring}.{self.codec}"
# for use in playlist file ordering
self.trackname: str = re.match(r"(?:\d{2,3} - )(.+?$)", self.filename).groups()[
0
]
def set_outfile(self):
"""Uses self.album_dir and self.metadata and self.filename
to craft the pathlib.Path object, self.outfile, that is a
reference to where the track will be written on disk."""
if self.album.number_of_volumes > 1:
self.outfile: Path = (
self.album_dir / f"Volume {self.metadata.volume_number}" / self.filename
)
self.absolute_outfile = str(self.outfile.absolute())
else:
self.outfile: Path = self.album_dir / self.filename
self.absolute_outfile = str(self.outfile.absolute())
if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):
logger.info(
f"Track {self.absolute_outfile} already exists "
"and therefore will not be overwritten"
)
return
else:
return self.outfile
def save_artist_image(self, session: Session):
"""This method writes a JPEG file with the name of each of
self.metadata.artists to self.album_dir"""
for a in self.metadata.artists:
track_artist_image: Path = (
self.album_dir / f"{a.name.replace('..', '')}.jpg"
)
if not track_artist_image.exists():
|
logger = logging.getLogger("__name__")
@dataclass
class Track:
track_id: int
def __post_init__(self):
self._has_lyrics: Optional[bool] = None
self.tags: dict = {}
self.album_cover_saved: bool = False
def get_metadata(self, session: Session):
self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(
session, self.track_id
)
def get_album(self, session: Session):
self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(
session, self.metadata.album.id
)
def get_credits(self, session: Session):
self.credits: Optional[TracksCreditsResponseJSON] = request_credits(
session, self.track_id
)
def get_lyrics(self, session: Session):
if self._has_lyrics is None:
self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(
session, self.track_id
)
if self.lyrics is None:
self._has_lyrics = False
else:
self._has_lyrics = True
else:
return self.lyrics
def get_stream(self, session: Session, audio_format: AudioFormat):
"""Populates self.stream, self.manifest"""
aq: Optional[str] = af_aq.get(audio_format)
self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(
session, self.track_id, aq
)
def set_manifest(self):
"""This method sets self.manifest and self.codec"""
self.manifest: Manifest = manifester(self.stream)
# https://dashif.org/codecs/audio/
if self.manifest.codecs == "flac":
self.codec = "flac"
elif self.manifest.codecs == "mqa":
self.codec = "flac"
elif self.manifest.codecs == "mha1": # Sony 360 Reality Audio
self.codec = "mka"
elif self.manifest.codecs == "mp4a.40.5": # HE-AAC
self.codec = "m4a"
elif self.manifest.codecs == "mp4a.40.29": # HE-AAC v2
self.codec = "m4a"
elif self.manifest.codecs == "mp4a.40.2": # AAC-LC
self.codec = "m4a"
elif self.manifest.codecs == "eac3": # Enhanced AC-3
self.codec = "m4a"
elif self.manifest.codecs == "mp4a.40.34": # MP3
self.codec = "mp3"
def set_album_dir(self, out_dir: Path):
"""This method sets self.album_dir, based on self.album and
out_dir. In particular, self.album_dir is a subdirectory of out_dir
based on the name of the album's artist"""
artist_substring: str = self.album.artist.name.replace("..", "")
album_substring: str = (
f"{self.album.name} " f"[{self.album.id}] [{self.album.release_date.year}]"
)
self.album_dir: Path = out_dir / artist_substring / album_substring
self.album_dir.mkdir(parents=True, exist_ok=True)
if self.album.number_of_volumes > 1:
volume_substring: str = f"Volume {self.metadata.volume_number}"
(self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)
def set_filename(self, audio_format: AudioFormat):
"""This method sets self.filename. It's based on self.metadata
as well as audio_format. Additionally, if the available codecs in
self.manifest don't match audio_format, warnings are logged"""
_track_part: str = f"{self.metadata.track_number:02d} - {self.metadata.name}"
if audio_format == AudioFormat.low:
track_substring: str = f"{_track_part} [L]"
elif audio_format == AudioFormat.high:
track_substring: str = f"{_track_part} [H]"
elif audio_format == AudioFormat.lossless:
track_substring: str = f"{_track_part} [CD]"
elif audio_format == AudioFormat.mqa:
track_substring: str = f"{_track_part} [Q]"
elif audio_format == AudioFormat.hi_res:
track_substring: str = f"{_track_part} [HiRes]"
elif audio_format == AudioFormat.dolby_atmos:
track_substring: str = f"{_track_part} [A]"
elif audio_format == AudioFormat.sony_360_reality_audio:
track_substring: str = f"{_track_part} [360]"
else:
track_substring: str = _track_part
# Check for MQA masquerading as HiRes here
if audio_format == AudioFormat.hi_res:
if self.manifest.codecs == "mqa":
logger.warning(
"Even though HiRes audio format was requested, this track is only "
"available in MQA format. TIDAL regards this as 'HiRes' even though "
"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. "
"Downloading of track will continue, but it will be marked as MQA."
)
self.filename: Optional[str] = f"{_track_part} [Q].{self.codec}"
elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):
logger.warning(
"Even though HiRes audio format was requested, and TIDAL responded to "
"that request without error, this track is only available in lossless "
"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will "
"continue, but it will be marked as Lossless ([CD])."
)
self.filename: Optional[str] = f"{_track_part} [CD].{self.codec}"
else:
self.filename: Optional[str] = f"{track_substring}.{self.codec}"
else:
self.filename: Optional[str] = f"{track_substring}.{self.codec}"
# for use in playlist file ordering
self.trackname: str = re.match(r"(?:\d{2,3} - )(.+?$)", self.filename).groups()[
0
]
def set_outfile(self):
"""Uses self.album_dir and self.metadata and self.filename
to craft the pathlib.Path object, self.outfile, that is a
reference to where the track will be written on disk."""
if self.album.number_of_volumes > 1:
self.outfile: Path = (
self.album_dir / f"Volume {self.metadata.volume_number}" / self.filename
)
self.absolute_outfile = str(self.outfile.absolute())
else:
self.outfile: Path = self.album_dir / self.filename
self.absolute_outfile = str(self.outfile.absolute())
if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):
logger.info(
f"Track {self.absolute_outfile} already exists "
"and therefore will not be overwritten"
)
return
else:
return self.outfile
def save_artist_image(self, session: Session):
"""This method writes a JPEG file with the name of each of
self.metadata.artists to self.album_dir"""
for a in self.metadata.artists:
track_artist_image: Path = (
self.album_dir / f"{a.name.replace('..', '')}.jpg"
)
if not track_artist_image.exists(): | download_artist_image(session, a, self.album_dir) | 16 | 2023-12-12 21:50:25+00:00 | 8k |
lbcb-sci/GNNome | inference.py | [
{
"identifier": "AssemblyGraphDataset",
"path": "graph_dataset.py",
"snippet": "class AssemblyGraphDataset(DGLDataset):\n def __init__(self, root, assembler, threads=32, generate=False):\n self.root = os.path.abspath(root)\n self.assembler = assembler\n self.threads = threads\n ... | import argparse
import os
import sys
import pickle
import random
import math
import collections
import time
import psutil
import torch
import torch.nn.functional as F
import dgl
import models
import evaluate
import utils
from tqdm import tqdm
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Manager
from graph_dataset import AssemblyGraphDataset
from hyperparameters import get_hyperparameters | 4,905 |
if len_walk_it > 2:
meanLogProb_it = sumLogProb_it / (len_walk_it - 2) # len(walk_f) - 1 + len(walk_b) - 1 <-> starting edge is neglected
try:
meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it)
except ValueError:
print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}')
print(f'Value error: something is wrong here!')
meanLogProb_scaled_it = 0
elif len_walk_it == 2:
meanLogProb_it = 0.0
try:
meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it)
except ValueError:
print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}')
print(f'Value error: something is wrong here!')
meanLogProb_scaled_it = 0
else: # len_walk_it == 1 <-> SELF-LOOP!
len_contig_it = 0
sumLogProb_it = 0.0
meanLogProb_it = 0.0
meanLogprob_scaled_it = 0.0
print(f'SELF-LOOP!')
print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12} ' \
f'sumLogProb={sumLogProb_it:<12.3f} meanLogProb={meanLogProb_it:<12.4} meanLogProb_scaled={meanLogProb_scaled_it:<12.4}')
indx += 1
all_walks.append(walk_it)
all_visited_iter.append(visited_iter)
all_contig_lens.append(len_contig_it)
all_sumLogProbs.append(sumLogProb_it)
all_meanLogProbs.append(meanLogProb_it)
all_meanLogProbs_scaled.append(meanLogProb_scaled_it)
best = max(all_contig_lens)
idxx = all_contig_lens.index(best)
elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_candidates)
print(f'Elapsed time (get_candidates): {elapsed}')
best_walk = all_walks[idxx]
best_visited = all_visited_iter[idxx]
# Add all jumped-over nodes
time_start_get_visited = datetime.now()
trans = set()
for ss, dd in zip(best_walk[:-1], best_walk[1:]):
t1 = set(succs[ss]) & set(preds[dd])
t2 = {t^1 for t in t1}
trans = trans | t1 | t2
best_visited = best_visited | trans
best_contig_len = all_contig_lens[idxx]
best_sumLogProb = all_sumLogProbs[idxx]
best_meanLogProb = all_meanLogProbs[idxx]
best_meanLogProb_scaled = all_meanLogProbs_scaled[idxx]
elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_visited)
print(f'Elapsed time (get visited): {elapsed}')
print(f'\nChosen walk with index: {idxx}')
print(f'len_walk={len(best_walk):<8} len_contig={best_contig_len:<12} ' \
f'sumLogProb={best_sumLogProb:<12.3f} meanLogProb={best_meanLogProb:<12.4} meanLogProb_scaled={best_meanLogProb_scaled:<12.4}\n')
if best_contig_len < 70000:
break
all_contigs.append(best_walk)
visited |= best_visited
all_walks_len.append(len(best_walk))
all_contigs_len.append(best_contig_len)
print(f'All walks len: {all_walks_len}')
print(f'All contigs len: {all_contigs_len}\n')
if len(all_contigs) % 10 == 0:
checkpoint = {
'walks': all_contigs,
'visited': visited,
'all_walks_len': all_walks_len,
'all_contigs_len': all_contigs_len
}
if not DEBUG:
try:
pickle.dump(checkpoint, open(f'{checkpoint_dir}/checkpoint_tmp.pkl', 'wb'))
os.rename(f'{checkpoint_dir}/checkpoint_tmp.pkl', f'{checkpoint_dir}/checkpoint.pkl')
except OSError:
print(f'Checkpoint was not saved. Last available checkopint: {checkpoint_dir}/checkpoint.pkl')
raise
return all_contigs
def inference(data_path, model_path, assembler, savedir, device='cpu', dropout=None):
"""Using a pretrained model, get walks and contigs on new data."""
hyperparameters = get_hyperparameters()
seed = hyperparameters['seed']
num_gnn_layers = hyperparameters['num_gnn_layers']
hidden_features = hyperparameters['dim_latent']
nb_pos_enc = hyperparameters['nb_pos_enc']
batch_norm = hyperparameters['batch_norm']
node_features = hyperparameters['node_features']
edge_features = hyperparameters['edge_features']
hidden_edge_features = hyperparameters['hidden_edge_features']
hidden_edge_scores = hyperparameters['hidden_edge_scores']
strategy = hyperparameters['strategy']
B = hyperparameters['B']
nb_paths = hyperparameters['num_decoding_paths']
len_threshold = hyperparameters['len_threshold']
use_labels = hyperparameters['decode_with_labels']
load_checkpoint = hyperparameters['load_checkpoint']
threads = hyperparameters['num_threads']
# assembly_path = hyperparameters['asms_path']
device = 'cpu' # Hardcode, because we cannot do inference on a GPU - usually not enough memory to load the whole graph
utils.set_seed(seed)
time_start = datetime.now()
|
DEBUG = False
def get_contig_length(walk, graph):
total_length = 0
idx_src = walk[:-1]
idx_dst = walk[1:]
prefix = graph.edges[idx_src, idx_dst].data['prefix_length']
total_length = prefix.sum().item()
total_length += graph.ndata['read_length'][walk[-1]]
return total_length
def get_subgraph(g, visited, device):
"""Remove the visited nodes from the graph."""
remove_node_idx = torch.LongTensor([item for item in visited])
list_node_idx = torch.arange(g.num_nodes())
keep_node_idx = torch.ones(g.num_nodes())
keep_node_idx[remove_node_idx] = 0
keep_node_idx = list_node_idx[keep_node_idx==1].int().to(device)
sub_g = dgl.node_subgraph(g, keep_node_idx, store_ids=True)
sub_g.ndata['idx_nodes'] = torch.arange(sub_g.num_nodes()).to(device)
map_subg_to_g = sub_g.ndata[dgl.NID]
return sub_g, map_subg_to_g
def sample_edges(prob_edges, nb_paths):
"""Sample edges with Bernoulli sampling."""
if prob_edges.shape[0] > 2**24:
prob_edges = prob_edges[:2**24] # torch.distributions.categorical.Categorical does not support tensors longer than 2**24
random_search = False
if random_search:
idx_edges = torch.randint(0, prob_edges.shape[0], (nb_paths,))
return idx_edges
prob_edges = prob_edges.masked_fill(prob_edges<1e-9, 1e-9)
prob_edges = prob_edges/ prob_edges.sum()
prob_edges_nb_paths = prob_edges.repeat(nb_paths, 1)
idx_edges = torch.distributions.categorical.Categorical(prob_edges_nb_paths).sample()
return idx_edges
def greedy_forwards(start, logProbs, neighbors, predecessors, edges, visited_old):
"""Greedy walk forwards."""
current = start
walk = []
visited = set()
sumLogProb = torch.tensor([0.0])
iteration = 0
while True:
walk.append(current)
visited.add(current)
visited.add(current ^ 1)
neighs_current = neighbors[current]
if len(neighs_current) == 0:
break
if len(neighs_current) == 1:
neighbor = neighs_current[0]
if neighbor in visited_old or neighbor in visited:
break
else:
sumLogProb += logProbs[edges[current, neighbor]]
current = neighbor
continue
masked_neighbors = [n for n in neighs_current if not (n in visited_old or n in visited)]
neighbor_edges = [edges[current, n] for n in masked_neighbors]
if not neighbor_edges:
break
neighbor_p = logProbs[neighbor_edges]
logProb, index = torch.topk(neighbor_p, k=1, dim=0)
sumLogProb += logProb
iteration += 1
current = masked_neighbors[index]
return walk, visited, sumLogProb
def greedy_backwards_rc(start, logProbs, predecessors, neighbors, edges, visited_old):
"""Greedy walk backwards."""
current = start ^ 1
walk = []
visited = set()
sumLogProb = torch.tensor([0.0])
iteration = 0
while True:
walk.append(current)
visited.add(current)
visited.add(current ^ 1)
neighs_current = neighbors[current]
if len(neighs_current) == 0:
break
if len(neighs_current) == 1:
neighbor = neighs_current[0]
if neighbor in visited_old or neighbor in visited:
break
else:
sumLogProb += logProbs[edges[current, neighbor]]
current = neighbor
continue
masked_neighbors = [n for n in neighs_current if not (n in visited_old or n in visited)]
neighbor_edges = [edges[current, n] for n in masked_neighbors]
if not neighbor_edges:
break
neighbor_p = logProbs[neighbor_edges]
logProb, index = torch.topk(neighbor_p, k=1, dim=0)
sumLogProb += logProb
iteration += 1
current = masked_neighbors[index]
walk = list(reversed([w ^ 1 for w in walk]))
return walk, visited, sumLogProb
def run_greedy_both_ways(src, dst, logProbs, succs, preds, edges, visited):
walk_f, visited_f, sumLogProb_f = greedy_forwards(dst, logProbs, succs, preds, edges, visited)
walk_b, visited_b, sumLogProb_b = greedy_backwards_rc(src, logProbs, preds, succs, edges, visited | visited_f)
return walk_f, walk_b, visited_f, visited_b, sumLogProb_f, sumLogProb_b
def get_contigs_greedy(g, succs, preds, edges, nb_paths=50, len_threshold=20, use_labels=False, checkpoint_dir=None, load_checkpoint=False, device='cpu', threads=32):
"""Iteratively search for contigs in a graph until the threshold is met."""
g = g.to('cpu')
all_contigs = []
all_walks_len = []
all_contigs_len = []
visited = set()
idx_contig = -1
B = 1
if use_labels:
scores = g.edata['y'].to('cpu')
scores = scores.masked_fill(scores<1e-9, 1e-9)
logProbs = torch.log(scores)
else:
scores = g.edata['score'].to('cpu')
logProbs = torch.log(torch.sigmoid(g.edata['score'].to('cpu')))
print(f'Starting to decode with greedy...')
print(f'num_candidates: {nb_paths}, len_threshold: {len_threshold}\n')
ckpt_file = os.path.join(checkpoint_dir, 'checkpoint.pkl')
if load_checkpoint and os.path.isfile(ckpt_file):
print(f'Loading checkpoint from: {checkpoint_dir}\n')
checkpoint = pickle.load(open(f'{checkpoint_dir}/checkpoint.pkl', 'rb'))
all_contigs = checkpoint['walks']
visited = checkpoint['visited']
idx_contig = len(all_contigs) - 1
all_walks_len = checkpoint['all_walks_len']
all_contigs_len = checkpoint['all_contigs_len']
while True:
idx_contig += 1
time_start_sample_edges = datetime.now()
sub_g, map_subg_to_g = get_subgraph(g, visited, 'cpu')
if sub_g.num_edges() == 0:
break
if use_labels: # Debugging
prob_edges = sub_g.edata['y']
else:
prob_edges = torch.sigmoid(sub_g.edata['score']).squeeze()
idx_edges = sample_edges(prob_edges, nb_paths)
elapsed = utils.timedelta_to_str(datetime.now() - time_start_sample_edges)
print(f'Elapsed time (sample edges): {elapsed}')
all_walks = []
all_visited_iter = []
all_contig_lens = []
all_sumLogProbs = []
all_meanLogProbs = []
all_meanLogProbs_scaled = []
print(f'\nidx_contig: {idx_contig}, nb_processed_nodes: {len(visited)}, ' \
f'nb_remaining_nodes: {g.num_nodes() - len(visited)}, nb_original_nodes: {g.num_nodes()}')
# Get nb_paths paths for a single iteration, then take the longest one
time_start_get_candidates = datetime.now()
with ThreadPoolExecutor(1) as executor:
if DEBUG:
print(f'Starting with greedy for one candidate', flush=True)
all_cand_time = datetime.now()
results = {}
start_times = {}
for e, idx in enumerate(idx_edges):
src_init_edges = map_subg_to_g[sub_g.edges()[0][idx]].item()
dst_init_edges = map_subg_to_g[sub_g.edges()[1][idx]].item()
start_times[e] = datetime.now()
if DEBUG:
print(f'About to submit job - decoding from edge {e}: {src_init_edges, dst_init_edges}', flush=True)
future = executor.submit(run_greedy_both_ways, src_init_edges, dst_init_edges, logProbs, succs, preds, edges, visited)
results[(src_init_edges, dst_init_edges)] = (future, e)
if DEBUG:
process = psutil.Process(os.getpid())
children = process.children(recursive=True)
print(f'Processes ran: {e+1}\n' \
f'Time needed: {utils.timedelta_to_str(datetime.now() - all_cand_time)}\n' \
f'Current process ID: {os.getpid()}\n' \
f'Total memory used (MB): {process.memory_info().rss / 1024 ** 2}', flush=True)
if len(children) == 0:
print(f'Process has no children!')
for child in children:
print(f'Child pid is {child.pid}', flush=True)
print()
indx = 0
for k, (f, e) in results.items(): # key, future -> Why did I not name this properly?
walk_f, walk_b, visited_f, visited_b, sumLogProb_f, sumLogProb_b = f.result()
if DEBUG:
print(f'Finished with candidate {e}: {k}\t' \
f'Time needed: {utils.timedelta_to_str(datetime.now() - start_times[e])}')
walk_it = walk_b + walk_f
visited_iter = visited_f | visited_b
sumLogProb_it = sumLogProb_f.item() + sumLogProb_b.item()
len_walk_it = len(walk_it)
len_contig_it = get_contig_length(walk_it, g).item()
if k[0] == k[1]:
len_walk_it = 1
if len_walk_it > 2:
meanLogProb_it = sumLogProb_it / (len_walk_it - 2) # len(walk_f) - 1 + len(walk_b) - 1 <-> starting edge is neglected
try:
meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it)
except ValueError:
print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}')
print(f'Value error: something is wrong here!')
meanLogProb_scaled_it = 0
elif len_walk_it == 2:
meanLogProb_it = 0.0
try:
meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it)
except ValueError:
print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}')
print(f'Value error: something is wrong here!')
meanLogProb_scaled_it = 0
else: # len_walk_it == 1 <-> SELF-LOOP!
len_contig_it = 0
sumLogProb_it = 0.0
meanLogProb_it = 0.0
meanLogprob_scaled_it = 0.0
print(f'SELF-LOOP!')
print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12} ' \
f'sumLogProb={sumLogProb_it:<12.3f} meanLogProb={meanLogProb_it:<12.4} meanLogProb_scaled={meanLogProb_scaled_it:<12.4}')
indx += 1
all_walks.append(walk_it)
all_visited_iter.append(visited_iter)
all_contig_lens.append(len_contig_it)
all_sumLogProbs.append(sumLogProb_it)
all_meanLogProbs.append(meanLogProb_it)
all_meanLogProbs_scaled.append(meanLogProb_scaled_it)
best = max(all_contig_lens)
idxx = all_contig_lens.index(best)
elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_candidates)
print(f'Elapsed time (get_candidates): {elapsed}')
best_walk = all_walks[idxx]
best_visited = all_visited_iter[idxx]
# Add all jumped-over nodes
time_start_get_visited = datetime.now()
trans = set()
for ss, dd in zip(best_walk[:-1], best_walk[1:]):
t1 = set(succs[ss]) & set(preds[dd])
t2 = {t^1 for t in t1}
trans = trans | t1 | t2
best_visited = best_visited | trans
best_contig_len = all_contig_lens[idxx]
best_sumLogProb = all_sumLogProbs[idxx]
best_meanLogProb = all_meanLogProbs[idxx]
best_meanLogProb_scaled = all_meanLogProbs_scaled[idxx]
elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_visited)
print(f'Elapsed time (get visited): {elapsed}')
print(f'\nChosen walk with index: {idxx}')
print(f'len_walk={len(best_walk):<8} len_contig={best_contig_len:<12} ' \
f'sumLogProb={best_sumLogProb:<12.3f} meanLogProb={best_meanLogProb:<12.4} meanLogProb_scaled={best_meanLogProb_scaled:<12.4}\n')
if best_contig_len < 70000:
break
all_contigs.append(best_walk)
visited |= best_visited
all_walks_len.append(len(best_walk))
all_contigs_len.append(best_contig_len)
print(f'All walks len: {all_walks_len}')
print(f'All contigs len: {all_contigs_len}\n')
if len(all_contigs) % 10 == 0:
checkpoint = {
'walks': all_contigs,
'visited': visited,
'all_walks_len': all_walks_len,
'all_contigs_len': all_contigs_len
}
if not DEBUG:
try:
pickle.dump(checkpoint, open(f'{checkpoint_dir}/checkpoint_tmp.pkl', 'wb'))
os.rename(f'{checkpoint_dir}/checkpoint_tmp.pkl', f'{checkpoint_dir}/checkpoint.pkl')
except OSError:
print(f'Checkpoint was not saved. Last available checkopint: {checkpoint_dir}/checkpoint.pkl')
raise
return all_contigs
def inference(data_path, model_path, assembler, savedir, device='cpu', dropout=None):
"""Using a pretrained model, get walks and contigs on new data."""
hyperparameters = get_hyperparameters()
seed = hyperparameters['seed']
num_gnn_layers = hyperparameters['num_gnn_layers']
hidden_features = hyperparameters['dim_latent']
nb_pos_enc = hyperparameters['nb_pos_enc']
batch_norm = hyperparameters['batch_norm']
node_features = hyperparameters['node_features']
edge_features = hyperparameters['edge_features']
hidden_edge_features = hyperparameters['hidden_edge_features']
hidden_edge_scores = hyperparameters['hidden_edge_scores']
strategy = hyperparameters['strategy']
B = hyperparameters['B']
nb_paths = hyperparameters['num_decoding_paths']
len_threshold = hyperparameters['len_threshold']
use_labels = hyperparameters['decode_with_labels']
load_checkpoint = hyperparameters['load_checkpoint']
threads = hyperparameters['num_threads']
# assembly_path = hyperparameters['asms_path']
device = 'cpu' # Hardcode, because we cannot do inference on a GPU - usually not enough memory to load the whole graph
utils.set_seed(seed)
time_start = datetime.now()
| ds = AssemblyGraphDataset(data_path, assembler) | 0 | 2023-12-08 04:45:45+00:00 | 8k |
SusheelThapa/C-DOTS | app.py | [
{
"identifier": "CodeDocumenter",
"path": "features/documenter.py",
"snippet": "class CodeDocumenter(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n self.typing_timer = QTimer(self)\n self.typing_timer.timeout.connect(self.type_next_character)\n ... | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QTabWidget, QWidget, QTabBar
from features.documenter import CodeDocumenter
from features.optimizer import CodeOptimizer
from features.summarizer import CodeSummarizer
from features.translator import CodeTranslator | 4,645 |
class StretchedTabBar(QTabBar):
def __init__(self, parent=None):
super().__init__(parent)
def tabSizeHint(self, index):
size = super().tabSizeHint(index)
if self.count() > 0:
size.setWidth(self.parent().width() // self.count())
return size
class CDOTSApp(QMainWindow):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setWindowTitle("C-DOTS: Your Coding Assistance")
self.setGeometry(100, 100, 1600, 900)
# Create the tab widget with a stretched tab bar
tab_widget = QTabWidget()
tab_widget.setTabBar(StretchedTabBar(tab_widget))
tab_widget.setStyleSheet(
"""
QTabBar::tab {
background-color: #333333;
color: #CCCCCC;
padding: 15px;
font-size:20px;
font-weight:500;
}
QTabBar::tab:selected {
background: #007BFF;
color: #FFFFFF;
}
QTabBar::tab:hover {
background: #555555;
color:#FFFFFF;
}
"""
)
# Add tabs
|
class StretchedTabBar(QTabBar):
def __init__(self, parent=None):
super().__init__(parent)
def tabSizeHint(self, index):
size = super().tabSizeHint(index)
if self.count() > 0:
size.setWidth(self.parent().width() // self.count())
return size
class CDOTSApp(QMainWindow):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setWindowTitle("C-DOTS: Your Coding Assistance")
self.setGeometry(100, 100, 1600, 900)
# Create the tab widget with a stretched tab bar
tab_widget = QTabWidget()
tab_widget.setTabBar(StretchedTabBar(tab_widget))
tab_widget.setStyleSheet(
"""
QTabBar::tab {
background-color: #333333;
color: #CCCCCC;
padding: 15px;
font-size:20px;
font-weight:500;
}
QTabBar::tab:selected {
background: #007BFF;
color: #FFFFFF;
}
QTabBar::tab:hover {
background: #555555;
color:#FFFFFF;
}
"""
)
# Add tabs | tab_widget.addTab(CodeDocumenter(), "Documenter") | 0 | 2023-12-07 10:48:28+00:00 | 8k |
amadad/agentcy3 | agency_swarm/agency/agency.py | [
{
"identifier": "Agent",
"path": "agency_swarm/agents/agent.py",
"snippet": "class Agent():\n @property\n def assistant(self):\n if self._assistant is None:\n raise Exception(\"Assistant is not initialized. Please run init_oai() first.\")\n return self._assistant\n\n @a... | import inspect
import os
import uuid
import gradio as gr
from enum import Enum
from typing import List
from pydantic import Field, field_validator
from agency_swarm.agents import Agent
from agency_swarm.threads import Thread
from agency_swarm.tools import BaseTool
from agency_swarm.user import User | 5,343 | self._read_instructions(os.path.join(self.get_class_folder_path(), shared_instructions))
elif os.path.isfile(shared_instructions):
self._read_instructions(shared_instructions)
else:
self.shared_instructions = shared_instructions
self._parse_agency_chart(agency_chart)
self._create_send_message_tools()
self._init_agents()
self._init_threads()
self.user = User()
self.main_thread = Thread(self.user, self.ceo)
def get_completion(self, message: str, yield_messages=True):
"""
Retrieves the completion for a given message from the main thread.
Parameters:
message (str): The message for which completion is to be retrieved.
yield_messages (bool, optional): Flag to determine if intermediate messages should be yielded. Defaults to True.
Returns:
Generator or final response: Depending on the 'yield_messages' flag, this method returns either a generator yielding intermediate messages or the final response from the main thread.
"""
gen = self.main_thread.get_completion(message=message, yield_messages=yield_messages)
if not yield_messages:
while True:
try:
next(gen)
except StopIteration as e:
return e.value
return gen
def demo_gradio(self, height=600):
"""
Launches a Gradio-based demo interface for the agency chatbot.
Parameters:
height (int, optional): The height of the chatbot widget in the Gradio interface. Default is 600.
This method sets up and runs a Gradio interface, allowing users to interact with the agency's chatbot. It includes a text input for the user's messages and a chatbot interface for displaying the conversation. The method handles user input and chatbot responses, updating the interface dynamically.
"""
try:
except ImportError:
raise Exception("Please install gradio: pip install gradio")
with gr.Blocks() as demo:
chatbot = gr.Chatbot(height=height)
msg = gr.Textbox()
def user(user_message, history):
# Append the user message with a placeholder for bot response
user_message = "👤 User: " + user_message.strip()
return "", history + [[user_message, None]]
def bot(history):
# Replace this with your actual chatbot logic
gen = self.get_completion(message=history[-1][0])
try:
# Yield each message from the generator
for bot_message in gen:
if bot_message.sender_name.lower() == "user":
continue
message = bot_message.get_sender_emoji() + " " + bot_message.get_formatted_content()
history.append((None, message))
yield history
except StopIteration:
# Handle the end of the conversation if necessary
pass
# Chain the events
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
# Enable queuing for streaming intermediate outputs
demo.queue()
# Launch the demo
demo.launch()
def run_demo(self):
"""
Runs a demonstration of the agency's capabilities in an interactive command line interface.
This function continuously prompts the user for input and displays responses from the agency's main thread. It leverages the generator pattern for asynchronous message processing.
Output:
Outputs the responses from the agency's main thread to the command line.
"""
while True:
text = input("USER: ")
try:
gen = self.main_thread.get_completion(message=text)
while True:
message = next(gen)
message.cprint()
except StopIteration as e:
pass
def _parse_agency_chart(self, agency_chart):
"""
Parses the provided agency chart to initialize and organize agents within the agency.
Parameters:
agency_chart: A structure representing the hierarchical organization of agents within the agency.
It can contain Agent objects and lists of Agent objects.
This method iterates through each node in the agency chart. If a node is an Agent, it is set as the CEO if not already assigned.
If a node is a list, it iterates through the agents in the list, adding them to the agency and establishing communication
threads between them. It raises an exception if the agency chart is invalid or if multiple CEOs are defined.
"""
for node in agency_chart:
|
class Agency:
def __init__(self, agency_chart, shared_instructions=""):
"""
Initializes the Agency object, setting up agents, threads, and core functionalities.
Parameters:
agency_chart: The structure defining the hierarchy and interaction of agents within the agency.
shared_instructions (str, optional): A path to a file containing shared instructions for all agents. Defaults to an empty string.
This constructor initializes various components of the Agency, including CEO, agents, threads, and user interactions. It parses the agency chart to set up the organizational structure and initializes the messaging tools, agents, and threads necessary for the operation of the agency. Additionally, it prepares a main thread for user interactions.
"""
self.ceo = None
self.agents = []
self.agents_and_threads = {}
if os.path.isfile(os.path.join(self.get_class_folder_path(), shared_instructions)):
self._read_instructions(os.path.join(self.get_class_folder_path(), shared_instructions))
elif os.path.isfile(shared_instructions):
self._read_instructions(shared_instructions)
else:
self.shared_instructions = shared_instructions
self._parse_agency_chart(agency_chart)
self._create_send_message_tools()
self._init_agents()
self._init_threads()
self.user = User()
self.main_thread = Thread(self.user, self.ceo)
def get_completion(self, message: str, yield_messages=True):
"""
Retrieves the completion for a given message from the main thread.
Parameters:
message (str): The message for which completion is to be retrieved.
yield_messages (bool, optional): Flag to determine if intermediate messages should be yielded. Defaults to True.
Returns:
Generator or final response: Depending on the 'yield_messages' flag, this method returns either a generator yielding intermediate messages or the final response from the main thread.
"""
gen = self.main_thread.get_completion(message=message, yield_messages=yield_messages)
if not yield_messages:
while True:
try:
next(gen)
except StopIteration as e:
return e.value
return gen
def demo_gradio(self, height=600):
"""
Launches a Gradio-based demo interface for the agency chatbot.
Parameters:
height (int, optional): The height of the chatbot widget in the Gradio interface. Default is 600.
This method sets up and runs a Gradio interface, allowing users to interact with the agency's chatbot. It includes a text input for the user's messages and a chatbot interface for displaying the conversation. The method handles user input and chatbot responses, updating the interface dynamically.
"""
try:
except ImportError:
raise Exception("Please install gradio: pip install gradio")
with gr.Blocks() as demo:
chatbot = gr.Chatbot(height=height)
msg = gr.Textbox()
def user(user_message, history):
# Append the user message with a placeholder for bot response
user_message = "👤 User: " + user_message.strip()
return "", history + [[user_message, None]]
def bot(history):
# Replace this with your actual chatbot logic
gen = self.get_completion(message=history[-1][0])
try:
# Yield each message from the generator
for bot_message in gen:
if bot_message.sender_name.lower() == "user":
continue
message = bot_message.get_sender_emoji() + " " + bot_message.get_formatted_content()
history.append((None, message))
yield history
except StopIteration:
# Handle the end of the conversation if necessary
pass
# Chain the events
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
# Enable queuing for streaming intermediate outputs
demo.queue()
# Launch the demo
demo.launch()
def run_demo(self):
"""
Runs a demonstration of the agency's capabilities in an interactive command line interface.
This function continuously prompts the user for input and displays responses from the agency's main thread. It leverages the generator pattern for asynchronous message processing.
Output:
Outputs the responses from the agency's main thread to the command line.
"""
while True:
text = input("USER: ")
try:
gen = self.main_thread.get_completion(message=text)
while True:
message = next(gen)
message.cprint()
except StopIteration as e:
pass
def _parse_agency_chart(self, agency_chart):
"""
Parses the provided agency chart to initialize and organize agents within the agency.
Parameters:
agency_chart: A structure representing the hierarchical organization of agents within the agency.
It can contain Agent objects and lists of Agent objects.
This method iterates through each node in the agency chart. If a node is an Agent, it is set as the CEO if not already assigned.
If a node is a list, it iterates through the agents in the list, adding them to the agency and establishing communication
threads between them. It raises an exception if the agency chart is invalid or if multiple CEOs are defined.
"""
for node in agency_chart: | if isinstance(node, Agent): | 0 | 2023-12-14 01:40:32+00:00 | 8k |
Deltares/imod-python | imod/mf6/evt.py | [
{
"identifier": "add_periodic_auxiliary_variable",
"path": "imod/mf6/auxiliary_variables.py",
"snippet": "def add_periodic_auxiliary_variable(package):\n if hasattr(package, \"_auxiliary_data\"):\n for aux_var_name, aux_var_dimensions in package._auxiliary_data.items():\n aux_coords... | from typing import Dict, List
from imod.mf6.auxiliary_variables import add_periodic_auxiliary_variable
from imod.mf6.boundary_condition import BoundaryCondition
from imod.mf6.regridding_utils import RegridderType
from imod.mf6.validation import BOUNDARY_DIMS_SCHEMA
from imod.schemata import (
AllInsideNoDataSchema,
AllNoDataSchema,
AllValueSchema,
CoordsSchema,
DimsSchema,
DTypeSchema,
IdentityNoDataSchema,
IndexesSchema,
OtherCoordsSchema,
)
from imod.util import unstack_dim_into_variable
import numpy as np | 6,021 | | DimsSchema("segment", "time", "layer", "{face_dim}")
| DimsSchema("segment", "layer", "{face_dim}")
# Layer dim not necessary, as long as there is a layer coordinate present.
| DimsSchema("segment", "time", "y", "x")
| DimsSchema("segment", "y", "x")
| DimsSchema("segment", "time", "{face_dim}")
| DimsSchema("segment", "{face_dim}")
)
class Evapotranspiration(BoundaryCondition):
"""
Evapotranspiration (EVT) Package.
Any number of EVT Packages can be specified for a single groundwater flow
model. All single-valued variables are free format.
https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=86
Parameters
----------
surface: array of floats (xr.DataArray)
is the elevation of the ET surface (L). A time-series name may be
specified.
rate: array of floats (xr.DataArray)
is the maximum ET flux rate (LT −1). A time-series name may be
specified.
depth: array of floats (xr.DataArray)
is the ET extinction depth (L). A time-series name may be specified.
proportion_rate: array of floats (xr.DataArray)
is the proportion of the maximum ET flux rate at the bottom of a segment
(dimensionless). A time-series name may be specified. (petm)
proportion_depth: array of floats (xr.DataArray)
is the proportion of the ET extinction depth at the bottom of a segment
(dimensionless). A timeseries name may be specified. (pxdp)
concentration: array of floats (xr.DataArray, optional)
if this flow package is used in simulations also involving transport, then this array is used
as the concentration for inflow over this boundary.
concentration_boundary_type: ({"AUX", "AUXMIXED"}, optional)
if this flow package is used in simulations also involving transport, then this keyword specifies
how outflow over this boundary is computed.
fixed_cell: array of floats (xr.DataArray)
indicates that evapotranspiration will not be reassigned to a cell
underlying the cell specified in the list if the specified cell is
inactive.
print_input: ({True, False}, optional)
keyword to indicate that the list of evapotranspiration information will
be written to the listing file immediately after it is read.
Default is False.
print_flows: ({True, False}, optional)
Indicates that the list of evapotranspiration flow rates will be printed
to the listing file for every stress period time step in which "BUDGET
PRINT" is specified in Output Control. If there is no Output Control
option and PRINT FLOWS is specified, then flow rates are printed for the
last time step of each stress period.
Default is False.
save_flows: ({True, False}, optional)
Indicates that evapotranspiration flow terms will be written to the file
specified with "BUDGET FILEOUT" in Output Control.
Default is False.
observations: [Not yet supported.]
Default is None.
validate: {True, False}
Flag to indicate whether the package should be validated upon
initialization. This raises a ValidationError if package input is
provided in the wrong manner. Defaults to True.
repeat_stress: Optional[xr.DataArray] of datetimes
Used to repeat data for e.g. repeating stress periods such as
seasonality without duplicating the values. The DataArray should have
dimensions ``("repeat", "repeat_items")``. The ``repeat_items``
dimension should have size 2: the first value is the "key", the second
value is the "value". For the "key" datetime, the data of the "value"
datetime will be used. Can also be set with a dictionary using the
``set_repeat_stress`` method.
"""
_pkg_id = "evt"
_init_schemata = {
"surface": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
BOUNDARY_DIMS_SCHEMA,
],
"rate": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
BOUNDARY_DIMS_SCHEMA,
],
"depth": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
BOUNDARY_DIMS_SCHEMA,
],
"proportion_rate": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
SEGMENT_BOUNDARY_DIMS_SCHEMA,
],
"proportion_depth": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
SEGMENT_BOUNDARY_DIMS_SCHEMA,
],
"print_flows": [DTypeSchema(np.bool_), DimsSchema()],
"save_flows": [DTypeSchema(np.bool_), DimsSchema()],
}
_write_schemata = {
"surface": [
OtherCoordsSchema("idomain"),
AllNoDataSchema(), # Check for all nan, can occur while clipping
AllInsideNoDataSchema(other="idomain", is_other_notnull=(">", 0)),
],
"rate": [IdentityNoDataSchema("surface")],
"depth": [IdentityNoDataSchema("surface")],
"proportion_rate": [IdentityNoDataSchema("surface")],
"proportion_depth": [
IdentityNoDataSchema("surface"),
|
SEGMENT_BOUNDARY_DIMS_SCHEMA = (
BOUNDARY_DIMS_SCHEMA
| DimsSchema("segment", "time", "layer", "y", "x")
| DimsSchema("segment", "layer", "y", "x")
| DimsSchema("segment", "time", "layer", "{face_dim}")
| DimsSchema("segment", "layer", "{face_dim}")
# Layer dim not necessary, as long as there is a layer coordinate present.
| DimsSchema("segment", "time", "y", "x")
| DimsSchema("segment", "y", "x")
| DimsSchema("segment", "time", "{face_dim}")
| DimsSchema("segment", "{face_dim}")
)
class Evapotranspiration(BoundaryCondition):
"""
Evapotranspiration (EVT) Package.
Any number of EVT Packages can be specified for a single groundwater flow
model. All single-valued variables are free format.
https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=86
Parameters
----------
surface: array of floats (xr.DataArray)
is the elevation of the ET surface (L). A time-series name may be
specified.
rate: array of floats (xr.DataArray)
is the maximum ET flux rate (LT −1). A time-series name may be
specified.
depth: array of floats (xr.DataArray)
is the ET extinction depth (L). A time-series name may be specified.
proportion_rate: array of floats (xr.DataArray)
is the proportion of the maximum ET flux rate at the bottom of a segment
(dimensionless). A time-series name may be specified. (petm)
proportion_depth: array of floats (xr.DataArray)
is the proportion of the ET extinction depth at the bottom of a segment
(dimensionless). A timeseries name may be specified. (pxdp)
concentration: array of floats (xr.DataArray, optional)
if this flow package is used in simulations also involving transport, then this array is used
as the concentration for inflow over this boundary.
concentration_boundary_type: ({"AUX", "AUXMIXED"}, optional)
if this flow package is used in simulations also involving transport, then this keyword specifies
how outflow over this boundary is computed.
fixed_cell: array of floats (xr.DataArray)
indicates that evapotranspiration will not be reassigned to a cell
underlying the cell specified in the list if the specified cell is
inactive.
print_input: ({True, False}, optional)
keyword to indicate that the list of evapotranspiration information will
be written to the listing file immediately after it is read.
Default is False.
print_flows: ({True, False}, optional)
Indicates that the list of evapotranspiration flow rates will be printed
to the listing file for every stress period time step in which "BUDGET
PRINT" is specified in Output Control. If there is no Output Control
option and PRINT FLOWS is specified, then flow rates are printed for the
last time step of each stress period.
Default is False.
save_flows: ({True, False}, optional)
Indicates that evapotranspiration flow terms will be written to the file
specified with "BUDGET FILEOUT" in Output Control.
Default is False.
observations: [Not yet supported.]
Default is None.
validate: {True, False}
Flag to indicate whether the package should be validated upon
initialization. This raises a ValidationError if package input is
provided in the wrong manner. Defaults to True.
repeat_stress: Optional[xr.DataArray] of datetimes
Used to repeat data for e.g. repeating stress periods such as
seasonality without duplicating the values. The DataArray should have
dimensions ``("repeat", "repeat_items")``. The ``repeat_items``
dimension should have size 2: the first value is the "key", the second
value is the "value". For the "key" datetime, the data of the "value"
datetime will be used. Can also be set with a dictionary using the
``set_repeat_stress`` method.
"""
_pkg_id = "evt"
_init_schemata = {
"surface": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
BOUNDARY_DIMS_SCHEMA,
],
"rate": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
BOUNDARY_DIMS_SCHEMA,
],
"depth": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
BOUNDARY_DIMS_SCHEMA,
],
"proportion_rate": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
SEGMENT_BOUNDARY_DIMS_SCHEMA,
],
"proportion_depth": [
DTypeSchema(np.floating),
IndexesSchema(),
CoordsSchema(("layer",)),
SEGMENT_BOUNDARY_DIMS_SCHEMA,
],
"print_flows": [DTypeSchema(np.bool_), DimsSchema()],
"save_flows": [DTypeSchema(np.bool_), DimsSchema()],
}
_write_schemata = {
"surface": [
OtherCoordsSchema("idomain"),
AllNoDataSchema(), # Check for all nan, can occur while clipping
AllInsideNoDataSchema(other="idomain", is_other_notnull=(">", 0)),
],
"rate": [IdentityNoDataSchema("surface")],
"depth": [IdentityNoDataSchema("surface")],
"proportion_rate": [IdentityNoDataSchema("surface")],
"proportion_depth": [
IdentityNoDataSchema("surface"), | AllValueSchema(">=", 0.0), | 6 | 2023-12-08 13:57:59+00:00 | 8k |
Dong142857/Live3DPortrait | models/eg3d/triplane.py | [
{
"identifier": "persistence",
"path": "torch_utils/persistence.py",
"snippet": "def persistent_class(orig_class):\n def __init__(self, *args, **kwargs):\n def init_args(self):\n def init_kwargs(self):\n def __reduce__(self):\ndef is_persistent(obj):\ndef import_hook(hook):\n... | import torch
import dnnlib
from torch_utils import persistence
from models.eg3d.networks_stylegan2 import Generator as StyleGAN2Backbone
from models.eg3d.volumetric_rendering.renderer import ImportanceRenderer
from models.eg3d.volumetric_rendering.ray_sampler import RaySampler
from models.eg3d.superresolution import SuperresolutionHybrid8XDC
from models.eg3d.networks_stylegan2 import FullyConnectedLayer | 4,998 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
@persistence.persistent_class
class TriPlaneGenerator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
| # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
@persistence.persistent_class
class TriPlaneGenerator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler() | self.backbone = StyleGAN2Backbone(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs) | 0 | 2023-12-09 15:18:53+00:00 | 8k |
blaise-tk/RVC_CLI | main.py | [
{
"identifier": "Config",
"path": "rvc/configs/config.py",
"snippet": "class Config:\n def __init__(self):\n self.device = \"cuda:0\"\n self.is_half = True\n self.use_jit = False\n self.n_cpu = 0\n self.gpu_name = None\n self.json_config = self.load_config_js... | import os
import sys
import argparse
import subprocess
from rvc.configs.config import Config
from rvc.lib.tools.validators import (
validate_sampling_rate,
validate_f0up_key,
validate_f0method,
validate_true_false,
validate_tts_voices,
)
from rvc.train.extract.preparing_files import generate_config, generate_filelist
from rvc.lib.tools.pretrained_selector import pretrained_selector
from rvc.lib.process.model_fusion import model_fusion
from rvc.lib.process.model_information import model_information | 5,141 | model_path = os.path.join(logs_path, str(model_name))
extract_f0_script_path = os.path.join(
"rvc", "train", "extract", "extract_f0_print.py"
)
extract_feature_script_path = os.path.join(
"rvc", "train", "extract", "extract_feature_print.py"
)
command_1 = [
"python",
extract_f0_script_path,
model_path,
f0method,
str(hop_length),
]
command_2 = [
"python",
extract_feature_script_path,
config.device,
"1",
"0",
"0",
model_path,
rvc_version,
"True",
]
subprocess.run(command_1)
subprocess.run(command_2)
generate_config(rvc_version, sampling_rate, model_path)
generate_filelist(f0method, model_path, rvc_version, sampling_rate)
return f"Model {model_name} extracted successfully."
# Train
def run_train_script(
model_name,
rvc_version,
save_every_epoch,
save_only_latest,
save_every_weights,
total_epoch,
sampling_rate,
batch_size,
gpu,
pitch_guidance,
pretrained,
custom_pretrained,
g_pretrained_path=None,
d_pretrained_path=None,
):
f0 = 1 if pitch_guidance == "True" else 0
latest = 1 if save_only_latest == "True" else 0
save_every = 1 if save_every_weights == "True" else 0
if pretrained == "True":
if custom_pretrained == "False":
pg, pd = pretrained_selector(f0)[rvc_version][sampling_rate]
else:
if g_pretrained_path is None or d_pretrained_path is None:
raise ValueError(
"Please provide the path to the pretrained G and D models."
)
pg, pd = g_pretrained_path, d_pretrained_path
else:
pg, pd = "", ""
train_script_path = os.path.join("rvc", "train", "train.py")
command = [
"python",
train_script_path,
"-se",
str(save_every_epoch),
"-te",
str(total_epoch),
"-pg",
pg,
"-pd",
pd,
"-sr",
str(sampling_rate),
"-bs",
str(batch_size),
"-g",
gpu,
"-e",
os.path.join(logs_path, str(model_name)),
"-v",
rvc_version,
"-l",
str(latest),
"-c",
"0",
"-sw",
str(save_every),
"-f0",
str(f0),
]
subprocess.run(command)
run_index_script(model_name, rvc_version)
return f"Model {model_name} trained successfully."
# Index
def run_index_script(model_name, rvc_version):
index_script_path = os.path.join("rvc", "train", "index_generator.py")
command = [
"python",
index_script_path,
os.path.join(logs_path, str(model_name)),
rvc_version,
]
subprocess.run(command)
return f"Index file for {model_name} generated successfully."
# Model information
def run_model_information_script(pth_path):
|
config = Config()
current_script_directory = os.path.dirname(os.path.realpath(__file__))
logs_path = os.path.join(current_script_directory, "logs")
subprocess.run(
["python", os.path.join("rvc", "lib", "tools", "prerequisites_download.py")]
)
# Infer
def run_infer_script(
f0up_key,
filter_radius,
index_rate,
hop_length,
f0method,
input_path,
output_path,
pth_file,
index_path,
):
infer_script_path = os.path.join("rvc", "infer", "infer.py")
command = [
"python",
infer_script_path,
str(f0up_key),
str(filter_radius),
str(index_rate),
str(hop_length),
f0method,
input_path,
output_path,
pth_file,
index_path,
]
subprocess.run(command)
return f"File {input_path} inferred successfully."
# Batch infer
def run_batch_infer_script(
f0up_key,
filter_radius,
index_rate,
hop_length,
f0method,
input_folder,
output_folder,
pth_file,
index_path,
):
infer_script_path = os.path.join("rvc", "infer", "infer.py")
audio_files = [
f for f in os.listdir(input_folder) if f.endswith((".mp3", ".wav", ".flac"))
]
print(f"Detected {len(audio_files)} audio files for inference.")
for audio_file in audio_files:
if "_output" in audio_file:
pass
else:
input_path = os.path.join(input_folder, audio_file)
output_file_name = os.path.splitext(os.path.basename(audio_file))[0]
output_path = os.path.join(
output_folder,
f"{output_file_name}_output{os.path.splitext(audio_file)[1]}",
)
print(f"Inferring {input_path}...")
command = [
"python",
infer_script_path,
str(f0up_key),
str(filter_radius),
str(index_rate),
str(hop_length),
f0method,
input_path,
output_path,
pth_file,
index_path,
]
subprocess.run(command)
return f"Files from {input_folder} inferred successfully."
# TTS
def run_tts_script(
tts_text,
tts_voice,
f0up_key,
filter_radius,
index_rate,
hop_length,
f0method,
output_tts_path,
output_rvc_path,
pth_file,
index_path,
):
tts_script_path = os.path.join("rvc", "lib", "tools", "tts.py")
infer_script_path = os.path.join("rvc", "infer", "infer.py")
if os.path.exists(output_tts_path):
os.remove(output_tts_path)
command_tts = [
"python",
tts_script_path,
tts_text,
tts_voice,
output_tts_path,
]
command_infer = [
"python",
infer_script_path,
str(f0up_key),
str(filter_radius),
str(index_rate),
str(hop_length),
f0method,
output_tts_path,
output_rvc_path,
pth_file,
index_path,
]
subprocess.run(command_tts)
subprocess.run(command_infer)
return f"Text {tts_text} synthesized successfully.", output_rvc_path
# Preprocess
def run_preprocess_script(model_name, dataset_path, sampling_rate):
per = 3.0 if config.is_half else 3.7
preprocess_script_path = os.path.join("rvc", "train", "preprocess", "preprocess.py")
command = [
"python",
preprocess_script_path,
os.path.join(logs_path, str(model_name)),
dataset_path,
str(sampling_rate),
str(per),
]
os.mkdir(os.path.join(logs_path, str(model_name)))
subprocess.run(command)
return f"Model {model_name} preprocessed successfully."
# Extract
def run_extract_script(model_name, rvc_version, f0method, hop_length, sampling_rate):
model_path = os.path.join(logs_path, str(model_name))
extract_f0_script_path = os.path.join(
"rvc", "train", "extract", "extract_f0_print.py"
)
extract_feature_script_path = os.path.join(
"rvc", "train", "extract", "extract_feature_print.py"
)
command_1 = [
"python",
extract_f0_script_path,
model_path,
f0method,
str(hop_length),
]
command_2 = [
"python",
extract_feature_script_path,
config.device,
"1",
"0",
"0",
model_path,
rvc_version,
"True",
]
subprocess.run(command_1)
subprocess.run(command_2)
generate_config(rvc_version, sampling_rate, model_path)
generate_filelist(f0method, model_path, rvc_version, sampling_rate)
return f"Model {model_name} extracted successfully."
# Train
def run_train_script(
model_name,
rvc_version,
save_every_epoch,
save_only_latest,
save_every_weights,
total_epoch,
sampling_rate,
batch_size,
gpu,
pitch_guidance,
pretrained,
custom_pretrained,
g_pretrained_path=None,
d_pretrained_path=None,
):
f0 = 1 if pitch_guidance == "True" else 0
latest = 1 if save_only_latest == "True" else 0
save_every = 1 if save_every_weights == "True" else 0
if pretrained == "True":
if custom_pretrained == "False":
pg, pd = pretrained_selector(f0)[rvc_version][sampling_rate]
else:
if g_pretrained_path is None or d_pretrained_path is None:
raise ValueError(
"Please provide the path to the pretrained G and D models."
)
pg, pd = g_pretrained_path, d_pretrained_path
else:
pg, pd = "", ""
train_script_path = os.path.join("rvc", "train", "train.py")
command = [
"python",
train_script_path,
"-se",
str(save_every_epoch),
"-te",
str(total_epoch),
"-pg",
pg,
"-pd",
pd,
"-sr",
str(sampling_rate),
"-bs",
str(batch_size),
"-g",
gpu,
"-e",
os.path.join(logs_path, str(model_name)),
"-v",
rvc_version,
"-l",
str(latest),
"-c",
"0",
"-sw",
str(save_every),
"-f0",
str(f0),
]
subprocess.run(command)
run_index_script(model_name, rvc_version)
return f"Model {model_name} trained successfully."
# Index
def run_index_script(model_name, rvc_version):
index_script_path = os.path.join("rvc", "train", "index_generator.py")
command = [
"python",
index_script_path,
os.path.join(logs_path, str(model_name)),
rvc_version,
]
subprocess.run(command)
return f"Index file for {model_name} generated successfully."
# Model information
def run_model_information_script(pth_path): | print(model_information(pth_path)) | 10 | 2023-12-10 21:09:41+00:00 | 8k |
SubConv/SubConv | api.py | [
{
"identifier": "pack",
"path": "modules/pack.py",
"snippet": "async def pack(url: list, urlstandalone: list, urlstandby:list, urlstandbystandalone: list, content: str, interval, domain, short):\n regionDict, total, providerProxyNames = await parse.mkList(content, urlstandalone) # regions available ... | from modules import pack
from modules import parse
from modules.convert import converter
from fastapi import FastAPI, HTTPException
from fastapi.requests import Request
from fastapi.responses import FileResponse, Response
from fastapi.staticfiles import StaticFiles
from urllib.parse import urlencode, unquote
from pathlib import Path
import uvicorn
import httpx
import argparse
import re | 3,821 | # coding=utf-8
def length(sth):
if sth is None:
return 0
else:
return len(sth)
app = FastAPI()
# mainpage
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/")
async def mainpage():
return FileResponse("static/index.html")
# subscription to proxy-provider
@app.get("/provider")
async def provider(request: Request):
headers = {'Content-Type': 'text/yaml;charset=utf-8'}
url = request.query_params.get("url")
async with httpx.AsyncClient() as client:
resp = await client.get(url, headers={'User-Agent':'clash'})
if resp.status_code < 200 or resp.status_code >= 300:
raise HTTPException(status_code=resp.status_code, detail=resp.text)
result = await parse.parseSubs(resp.text)
return Response(content=result, headers=headers)
# subscription converter api
@app.get("/sub")
async def sub(request: Request):
args = request.query_params
# get interval
if "interval" in args:
interval = args["interval"]
else:
interval = "1800"
short = args.get("short")
# get the url of original subscription
url = args.get("url")
url = re.split(r"[|\n]", url)
# remove empty lines
tmp = list(filter(lambda x: x!="", url))
url = []
urlstandalone = []
for i in tmp:
if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"):
url.append(i)
else:
urlstandalone.append(i)
urlstandalone = "\n".join(urlstandalone)
if len(url) == 0:
url = None
if len(urlstandalone) == 0:
urlstandalone = None
urlstandby = args.get("urlstandby")
urlstandbystandalone = None
if urlstandby:
urlstandby = re.split(r"[|\n]", urlstandby)
tmp = list(filter(lambda x: x!="", urlstandby))
urlstandby = []
urlstandbystandalone = []
for i in tmp:
if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"):
urlstandby.append(i)
else:
urlstandbystandalone.append(i)
urlstandbystandalone = "\n".join(urlstandbystandalone)
if len(urlstandby) == 0:
urlstandby = None
if len(urlstandbystandalone) == 0:
urlstandbystandalone = None
if urlstandalone:
urlstandalone = await converter.ConvertsV2Ray(urlstandalone)
if urlstandbystandalone:
urlstandbystandalone = await converter.ConvertsV2Ray(urlstandbystandalone)
async with httpx.AsyncClient() as client:
# get original headers
headers = {'Content-Type': 'text/yaml;charset=utf-8'}
# if there's only one subscription, return userinfo
if length(url) == 1:
resp = await client.head(url[0], headers={'User-Agent':'clash'})
if resp.status_code < 200 or resp.status_code >= 300:
raise HTTPException(status_code=resp.status_code, detail=resp.text)
originalHeaders = resp.headers
if 'subscription-userinfo' in originalHeaders: # containing info about ramaining flow
headers['subscription-userinfo'] = originalHeaders['subscription-userinfo']
if 'Content-Disposition' in originalHeaders: # containing filename
headers['Content-Disposition'] = originalHeaders['Content-Disposition'].replace("attachment", "inline")
content = [] # the proxies of original subscriptions
if url is not None:
for i in range(len(url)):
# the test of response
respText = (await client.get(url[i], headers={'User-Agent':'clash'})).text
content.append(await parse.parseSubs(respText))
url[i] = "{}provider?{}".format(request.base_url, urlencode({"url": url[i]}))
if len(content) == 0:
content = None
if urlstandby:
for i in range(len(urlstandby)):
urlstandby[i] = "{}provider?{}".format(request.base_url, urlencode({"url": urlstandby[i]}))
# get the domain or ip of this api to add rule for this
domain = re.search(r"([^:]+)(:\d{1,5})?", request.url.hostname).group(1)
# generate the subscription
| #!/usr/bin/env python3
# coding=utf-8
def length(sth):
if sth is None:
return 0
else:
return len(sth)
app = FastAPI()
# mainpage
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/")
async def mainpage():
return FileResponse("static/index.html")
# subscription to proxy-provider
@app.get("/provider")
async def provider(request: Request):
headers = {'Content-Type': 'text/yaml;charset=utf-8'}
url = request.query_params.get("url")
async with httpx.AsyncClient() as client:
resp = await client.get(url, headers={'User-Agent':'clash'})
if resp.status_code < 200 or resp.status_code >= 300:
raise HTTPException(status_code=resp.status_code, detail=resp.text)
result = await parse.parseSubs(resp.text)
return Response(content=result, headers=headers)
# subscription converter api
@app.get("/sub")
async def sub(request: Request):
args = request.query_params
# get interval
if "interval" in args:
interval = args["interval"]
else:
interval = "1800"
short = args.get("short")
# get the url of original subscription
url = args.get("url")
url = re.split(r"[|\n]", url)
# remove empty lines
tmp = list(filter(lambda x: x!="", url))
url = []
urlstandalone = []
for i in tmp:
if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"):
url.append(i)
else:
urlstandalone.append(i)
urlstandalone = "\n".join(urlstandalone)
if len(url) == 0:
url = None
if len(urlstandalone) == 0:
urlstandalone = None
urlstandby = args.get("urlstandby")
urlstandbystandalone = None
if urlstandby:
urlstandby = re.split(r"[|\n]", urlstandby)
tmp = list(filter(lambda x: x!="", urlstandby))
urlstandby = []
urlstandbystandalone = []
for i in tmp:
if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"):
urlstandby.append(i)
else:
urlstandbystandalone.append(i)
urlstandbystandalone = "\n".join(urlstandbystandalone)
if len(urlstandby) == 0:
urlstandby = None
if len(urlstandbystandalone) == 0:
urlstandbystandalone = None
if urlstandalone:
urlstandalone = await converter.ConvertsV2Ray(urlstandalone)
if urlstandbystandalone:
urlstandbystandalone = await converter.ConvertsV2Ray(urlstandbystandalone)
async with httpx.AsyncClient() as client:
# get original headers
headers = {'Content-Type': 'text/yaml;charset=utf-8'}
# if there's only one subscription, return userinfo
if length(url) == 1:
resp = await client.head(url[0], headers={'User-Agent':'clash'})
if resp.status_code < 200 or resp.status_code >= 300:
raise HTTPException(status_code=resp.status_code, detail=resp.text)
originalHeaders = resp.headers
if 'subscription-userinfo' in originalHeaders: # containing info about ramaining flow
headers['subscription-userinfo'] = originalHeaders['subscription-userinfo']
if 'Content-Disposition' in originalHeaders: # containing filename
headers['Content-Disposition'] = originalHeaders['Content-Disposition'].replace("attachment", "inline")
content = [] # the proxies of original subscriptions
if url is not None:
for i in range(len(url)):
# the test of response
respText = (await client.get(url[i], headers={'User-Agent':'clash'})).text
content.append(await parse.parseSubs(respText))
url[i] = "{}provider?{}".format(request.base_url, urlencode({"url": url[i]}))
if len(content) == 0:
content = None
if urlstandby:
for i in range(len(urlstandby)):
urlstandby[i] = "{}provider?{}".format(request.base_url, urlencode({"url": urlstandby[i]}))
# get the domain or ip of this api to add rule for this
domain = re.search(r"([^:]+)(:\d{1,5})?", request.url.hostname).group(1)
# generate the subscription | result = await pack.pack(url=url, urlstandalone=urlstandalone, urlstandby=urlstandby,urlstandbystandalone=urlstandbystandalone, content=content, interval=interval, domain=domain, short=short) | 0 | 2023-12-06 12:57:11+00:00 | 8k |
Opt-Mucca/PySCIPOpt-ML | src/pyscipopt_ml/modelling/neuralnet/layers.py | [
{
"identifier": "ParameterError",
"path": "src/pyscipopt_ml/exceptions.py",
"snippet": "class ParameterError(Exception):\n \"\"\"Wrong parameter to a function.\"\"\"\n\n def __init__(self, message):\n super().__init__(message)"
},
{
"identifier": "AbstractPredictorConstr",
"path... | from ...exceptions import ParameterError
from ..base_predictor_constraint import AbstractPredictorConstr
from ..var_utils import create_vars
from .activations import (
add_identity_activation_constraint_layer,
add_relu_activation_constraint_layer,
add_sigmoid_activation_constraint_layer,
add_tanh_activation_constraint_layer,
) | 6,148 | raise AssertionError("Cannot compute the error of an individual layer")
class ActivationLayer(AbstractNNLayer):
"""Class to build one activation layer of a neural network."""
def __init__(
self,
scip_model,
output_vars,
input_vars,
activation_function,
unique_naming_prefix,
**kwargs,
):
super().__init__(
scip_model,
input_vars,
output_vars,
activation_function,
unique_naming_prefix,
**kwargs,
)
def _create_output_vars(self, input_vars):
output_vars = create_vars(
input_vars.shape,
vtype="C",
lb=None,
ub=None,
name_prefix=self.unique_naming_prefix + "output",
)
return output_vars
def _mip_model(self, **kwargs):
"""Add the layer to model."""
if self.activation == "relu":
slack = create_vars(
self.scip_model,
(self.input.shape[0], self.output.shape[-1]),
vtype="C",
lb=0.0,
ub=None,
name_prefix=self.unique_naming_prefix + "slack",
)
affine_slack_cons, sos_cons = add_relu_activation_constraint_layer(
self, slack, activation_only=True
)
self._created_vars.append(slack)
self._created_cons.append(affine_slack_cons)
self._created_cons.append(sos_cons)
elif self.activation == "logistic":
sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=True)
self._created_cons.append(sigmoid_cons)
elif self.activation == "tanh":
tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=True)
self._created_cons.append(tanh_cons)
else:
raise ParameterError(f"Activation layer of type {self.activation} shouldn't exist")
class DenseLayer(AbstractNNLayer):
"""Class to build one layer of a neural network."""
def __init__(
self,
scip_model,
input_vars,
layer_coefs,
layer_intercept,
output_vars,
activation_function,
unique_naming_prefix,
**kwargs,
):
self.coefs = layer_coefs
self.intercept = layer_intercept
super().__init__(
scip_model,
input_vars,
output_vars,
activation_function,
unique_naming_prefix,
**kwargs,
)
def _create_output_vars(self, input_vars):
output_vars = create_vars(
(input_vars.shape[0], self.coefs.shape[-1]),
vtype="C",
lb=None,
ub=None,
name_prefix=self.unique_naming_prefix + "output",
)
return output_vars
def _mip_model(self, **kwargs):
"""Add the layer to model."""
if self.activation == "relu":
slack = create_vars(
self.scip_model,
(self.input.shape[0], self.output.shape[-1]),
vtype="C",
lb=0.0,
ub=None,
name_prefix=self.unique_naming_prefix + "slack",
)
affine_slack_cons, sos_cons = add_relu_activation_constraint_layer(
self, slack, activation_only=False
)
self._created_vars.append(slack)
self._created_cons.append(affine_slack_cons)
self._created_cons.append(sos_cons)
elif self.activation == "logistic":
sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=False)
self._created_cons.append(sigmoid_cons)
elif self.activation == "tanh":
tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=False)
self._created_cons.append(tanh_cons)
elif self.activation == "identity":
| """Bases classes for modeling neural network layers."""
class AbstractNNLayer(AbstractPredictorConstr):
"""Abstract class for NN layers."""
def __init__(
self,
scip_model,
input_vars,
output_vars,
activation,
unique_naming_prefix,
**kwargs,
):
self.activation = activation
AbstractPredictorConstr.__init__(
self, scip_model, input_vars, output_vars, unique_naming_prefix, **kwargs
)
def get_error(self, eps=None):
# We can't compute externally the error of a layer
raise AssertionError("Cannot compute the error of an individual layer")
class ActivationLayer(AbstractNNLayer):
"""Class to build one activation layer of a neural network."""
def __init__(
self,
scip_model,
output_vars,
input_vars,
activation_function,
unique_naming_prefix,
**kwargs,
):
super().__init__(
scip_model,
input_vars,
output_vars,
activation_function,
unique_naming_prefix,
**kwargs,
)
def _create_output_vars(self, input_vars):
output_vars = create_vars(
input_vars.shape,
vtype="C",
lb=None,
ub=None,
name_prefix=self.unique_naming_prefix + "output",
)
return output_vars
def _mip_model(self, **kwargs):
"""Add the layer to model."""
if self.activation == "relu":
slack = create_vars(
self.scip_model,
(self.input.shape[0], self.output.shape[-1]),
vtype="C",
lb=0.0,
ub=None,
name_prefix=self.unique_naming_prefix + "slack",
)
affine_slack_cons, sos_cons = add_relu_activation_constraint_layer(
self, slack, activation_only=True
)
self._created_vars.append(slack)
self._created_cons.append(affine_slack_cons)
self._created_cons.append(sos_cons)
elif self.activation == "logistic":
sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=True)
self._created_cons.append(sigmoid_cons)
elif self.activation == "tanh":
tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=True)
self._created_cons.append(tanh_cons)
else:
raise ParameterError(f"Activation layer of type {self.activation} shouldn't exist")
class DenseLayer(AbstractNNLayer):
"""Class to build one layer of a neural network."""
def __init__(
self,
scip_model,
input_vars,
layer_coefs,
layer_intercept,
output_vars,
activation_function,
unique_naming_prefix,
**kwargs,
):
self.coefs = layer_coefs
self.intercept = layer_intercept
super().__init__(
scip_model,
input_vars,
output_vars,
activation_function,
unique_naming_prefix,
**kwargs,
)
def _create_output_vars(self, input_vars):
output_vars = create_vars(
(input_vars.shape[0], self.coefs.shape[-1]),
vtype="C",
lb=None,
ub=None,
name_prefix=self.unique_naming_prefix + "output",
)
return output_vars
def _mip_model(self, **kwargs):
"""Add the layer to model."""
if self.activation == "relu":
slack = create_vars(
self.scip_model,
(self.input.shape[0], self.output.shape[-1]),
vtype="C",
lb=0.0,
ub=None,
name_prefix=self.unique_naming_prefix + "slack",
)
affine_slack_cons, sos_cons = add_relu_activation_constraint_layer(
self, slack, activation_only=False
)
self._created_vars.append(slack)
self._created_cons.append(affine_slack_cons)
self._created_cons.append(sos_cons)
elif self.activation == "logistic":
sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=False)
self._created_cons.append(sigmoid_cons)
elif self.activation == "tanh":
tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=False)
self._created_cons.append(tanh_cons)
elif self.activation == "identity": | affine_cons = add_identity_activation_constraint_layer(self) | 3 | 2023-12-10 20:28:22+00:00 | 8k |
Yanyutin753/CowAndPandoraNext | plugins/godcmd/godcmd.py | [
{
"identifier": "Bridge",
"path": "bridge/bridge.py",
"snippet": "class Bridge(object):\n def __init__(self):\n self.btype = {\n \"chat\": const.CHATGPT,\n \"voice_to_text\": conf().get(\"voice_to_text\", \"openai\"),\n \"text_to_voice\": conf().get(\"text_to_v... | import random
import string
import plugins
from typing import Tuple
from bridge.bridge import Bridge
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common import const
from config import load_config, global_config
from plugins import *
from auto_share_token import Share_token_config
from custom_instructions import get_messages, modify_messages_user, modify_messages_model | 4,688 | reply = Reply()
reply.type = ReplyType.ERROR
reply.content = f"空指令,输入#help查看指令列表\n"
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
return
# msg = e_context['context']['msg']
channel = e_context["channel"]
user = e_context["context"]["receiver"]
session_id = e_context["context"]["session_id"]
isgroup = e_context["context"].get("isgroup", False)
bottype = Bridge().get_bot_type("chat")
bot = Bridge().get_bot("chat")
# 将命令和参数分割
content = content[1:].strip() # 移除前缀
command_parts = content.split(maxsplit=1)
cmd = command_parts[0]
# 检查是否有参数
if len(command_parts) > 1:
if cmd == 'update_ci_user' or cmd == 'update_ci_model':
args = [command_parts[1]] # 使用剩余的内容作为参数
else:
args = command_parts[1].split() # 用空格分割参数
else:
args = [] # 没有参数
isadmin = False
if user in self.admin_users:
isadmin = True
ok = False
result = "string"
if any(cmd in info["alias"] for info in COMMANDS.values()):
cmd = next(c for c, info in COMMANDS.items() if cmd in info["alias"])
if cmd == "auth":
ok, result = self.authenticate(user, args, isadmin, isgroup)
elif cmd == "help" or cmd == "helpp":
if len(args) == 0:
ok, result = True, get_help_text(isadmin, isgroup)
else:
# This can replace the helpp command
plugins = PluginManager().list_plugins()
query_name = args[0].upper()
# search name and namecn
for name, plugincls in plugins.items():
if not plugincls.enabled:
continue
if query_name == name or query_name == plugincls.namecn:
ok, result = True, PluginManager().instances[name].get_help_text(isgroup=isgroup, isadmin=isadmin, verbose=True)
break
if not ok:
result = "插件不存在或未启用"
elif cmd == "model":
if not isadmin and not self.is_admin_in_group(e_context["context"]):
ok, result = False, "需要管理员权限执行"
elif len(args) == 0:
ok, result = True, "当前模型为: " + str(conf().get("model"))
elif len(args) == 1:
if args[0] not in const.MODEL_LIST:
ok, result = False, "模型名称不存在"
else:
conf()["model"] = args[0]
Bridge().reset_bot()
ok, result = True, "模型设置为: " + str(conf().get("model"))
elif cmd == "id":
ok, result = True, user
elif cmd == "set_openai_api_key":
if len(args) == 1:
user_data = conf().get_user_data(user)
user_data["openai_api_key"] = args[0]
ok, result = True, "你的OpenAI私有api_key已设置为" + args[0]
else:
ok, result = False, "请提供一个api_key"
elif cmd == "reset_openai_api_key":
try:
user_data = conf().get_user_data(user)
user_data.pop("openai_api_key")
ok, result = True, "你的OpenAI私有api_key已清除"
except Exception as e:
ok, result = False, "你没有设置私有api_key"
elif cmd == "set_gpt_model":
if len(args) == 1:
user_data = conf().get_user_data(user)
user_data["gpt_model"] = args[0]
ok, result = True, "你的GPT模型已设置为" + args[0]
else:
ok, result = False, "请提供一个GPT模型"
elif cmd == "gpt_model":
user_data = conf().get_user_data(user)
model = conf().get("model")
if "gpt_model" in user_data:
model = user_data["gpt_model"]
ok, result = True, "你的GPT模型为" + str(model)
elif cmd == "reset_gpt_model":
try:
user_data = conf().get_user_data(user)
user_data.pop("gpt_model")
ok, result = True, "你的GPT模型已重置"
except Exception as e:
ok, result = False, "你没有设置私有GPT模型"
elif cmd == "reset":
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI]:
bot.sessions.clear_session(session_id)
channel.cancel_session(session_id)
ok, result = True, "会话已重置"
else:
ok, result = False, "当前对话机器人不支持重置会话"
logger.debug("[Godcmd] command: %s by %s" % (cmd, user))
elif any(cmd in info["alias"] for info in ADMIN_COMMANDS.values()):
if isadmin:
if isgroup:
ok, result = False, "群聊不可执行管理员指令"
else:
cmd = next(c for c, info in ADMIN_COMMANDS.items() if cmd in info["alias"])
if cmd == "stop":
self.isrunning = False
ok, result = True, "服务已暂停"
elif cmd == "resume":
self.isrunning = True
ok, result = True, "服务已恢复"
elif cmd == "update_token":
print("开始修改分享token")
| # encoding:utf-8
# 定义指令集
COMMANDS = {
"help": {
"alias": ["help", "帮助"],
"desc": "回复此帮助",
},
"helpp": {
"alias": ["help", "帮助"], # 与help指令共用别名,根据参数数量区分
"args": ["插件名"],
"desc": "回复指定插件的详细帮助",
},
"auth": {
"alias": ["auth", "认证"],
"args": ["口令"],
"desc": "管理员认证",
},
"model": {
"alias": ["model", "模型"],
"desc": "查看和设置全局模型",
},
"set_openai_api_key": {
"alias": ["set_openai_api_key"],
"args": ["api_key"],
"desc": "设置你的OpenAI私有api_key",
},
"reset_openai_api_key": {
"alias": ["reset_openai_api_key"],
"desc": "重置为默认的api_key",
},
"set_gpt_model": {
"alias": ["set_gpt_model"],
"desc": "设置你的私有模型",
},
"reset_gpt_model": {
"alias": ["reset_gpt_model"],
"desc": "重置你的私有模型",
},
"gpt_model": {
"alias": ["gpt_model"],
"desc": "查询你使用的模型",
},
"id": {
"alias": ["id", "用户"],
"desc": "获取用户id", # wechaty和wechatmp的用户id不会变化,可用于绑定管理员
},
"reset": {
"alias": ["reset", "重置会话"],
"desc": "重置会话",
},
}
ADMIN_COMMANDS = {
"resume": {
"alias": ["resume", "恢复服务"],
"desc": "恢复服务",
},
"update_token": {
"alias": ["update_token", "更新chatgpt"],
"desc": "更新chatgpt",
},
"stop": {
"alias": ["stop", "暂停服务"],
"desc": "暂停服务",
},
"reconf": {
"alias": ["reconf", "重载配置"],
"desc": "重载配置(不包含插件配置)",
},
"resetall": {
"alias": ["resetall", "重置所有会话"],
"desc": "重置所有会话",
},
"scanp": {
"alias": ["scanp", "扫描插件"],
"desc": "扫描插件目录是否有新插件",
},
"plist": {
"alias": ["plist", "插件"],
"desc": "打印当前插件列表",
},
"setpri": {
"alias": ["setpri", "设置插件优先级"],
"args": ["插件名", "优先级"],
"desc": "设置指定插件的优先级,越大越优先",
},
"reloadp": {
"alias": ["reloadp", "重载插件"],
"args": ["插件名"],
"desc": "重载指定插件配置",
},
"enablep": {
"alias": ["enablep", "启用插件"],
"args": ["插件名"],
"desc": "启用指定插件",
},
"disablep": {
"alias": ["disablep", "禁用插件"],
"args": ["插件名"],
"desc": "禁用指定插件",
},
"installp": {
"alias": ["installp", "安装插件"],
"args": ["仓库地址或插件名"],
"desc": "安装指定插件",
},
"uninstallp": {
"alias": ["uninstallp", "卸载插件"],
"args": ["插件名"],
"desc": "卸载指定插件",
},
"updatep": {
"alias": ["updatep", "更新插件"],
"args": ["插件名"],
"desc": "更新指定插件",
},
"debug": {
"alias": ["debug", "调试模式", "DEBUG"],
"desc": "开启机器调试日志",
},
}
# 定义帮助函数
def get_help_text(isadmin, isgroup):
help_text = "通用指令:\n"
for cmd, info in COMMANDS.items():
if cmd == "auth": # 不提示认证指令
continue
if cmd == "id" and conf().get("channel_type", "wx") not in ["wxy", "wechatmp"]:
continue
alias = ["#" + a for a in info["alias"][:1]]
help_text += f"{','.join(alias)} "
if "args" in info:
args = [a for a in info["args"]]
help_text += f"{' '.join(args)}"
help_text += f": {info['desc']}\n"
# 插件指令
plugins = PluginManager().list_plugins()
help_text += "\n目前可用插件有:"
for plugin in plugins:
if plugins[plugin].enabled and not plugins[plugin].hidden:
namecn = plugins[plugin].namecn
help_text += "\n%s:" % namecn
help_text += PluginManager().instances[plugin].get_help_text(verbose=False).strip()
if ADMIN_COMMANDS and isadmin:
help_text += "\n\n管理员指令:\n"
for cmd, info in ADMIN_COMMANDS.items():
alias = ["#" + a for a in info["alias"][:1]]
help_text += f"{','.join(alias)} "
if "args" in info:
args = [a for a in info["args"]]
help_text += f"{' '.join(args)}"
help_text += f": {info['desc']}\n"
return help_text
@plugins.register(
name="Godcmd",
desire_priority=999,
hidden=True,
desc="为你的机器人添加指令集,有用户和管理员两种角色,加载顺序请放在首位,初次运行后插件目录会生成配置文件, 填充管理员密码后即可认证",
version="1.0",
author="lanvent",
)
class Godcmd(Plugin):
def __init__(self):
super().__init__()
config_path = os.path.join(os.path.dirname(__file__), "config.json")
gconf = super().load_config()
if not gconf:
if not os.path.exists(config_path):
gconf = {"password": "", "admin_users": []}
with open(config_path, "w") as f:
json.dump(gconf, f, indent=4)
if gconf["password"] == "":
self.temp_password = "".join(random.sample(string.digits, 4))
logger.info("[Godcmd] 因未设置口令,本次的临时口令为%s。" % self.temp_password)
else:
self.temp_password = None
custom_commands = conf().get("clear_memory_commands", [])
for custom_command in custom_commands:
if custom_command and custom_command.startswith("#"):
custom_command = custom_command[1:]
if custom_command and custom_command not in COMMANDS["reset"]["alias"]:
COMMANDS["reset"]["alias"].append(custom_command)
self.password = gconf["password"]
self.admin_users = gconf["admin_users"] # 预存的管理员账号,这些账号不需要认证。itchat的用户名每次都会变,不可用
self.isrunning = True # 机器人是否运行中
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
logger.info("[Godcmd] inited")
def on_handle_context(self, e_context: EventContext):
context_type = e_context["context"].type
if context_type != ContextType.TEXT:
if not self.isrunning:
e_context.action = EventAction.BREAK_PASS
return
content = e_context["context"].content
logger.debug("[Godcmd] on_handle_context. content: %s" % content)
if content.startswith("#"):
if len(content) == 1:
reply = Reply()
reply.type = ReplyType.ERROR
reply.content = f"空指令,输入#help查看指令列表\n"
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
return
# msg = e_context['context']['msg']
channel = e_context["channel"]
user = e_context["context"]["receiver"]
session_id = e_context["context"]["session_id"]
isgroup = e_context["context"].get("isgroup", False)
bottype = Bridge().get_bot_type("chat")
bot = Bridge().get_bot("chat")
# 将命令和参数分割
content = content[1:].strip() # 移除前缀
command_parts = content.split(maxsplit=1)
cmd = command_parts[0]
# 检查是否有参数
if len(command_parts) > 1:
if cmd == 'update_ci_user' or cmd == 'update_ci_model':
args = [command_parts[1]] # 使用剩余的内容作为参数
else:
args = command_parts[1].split() # 用空格分割参数
else:
args = [] # 没有参数
isadmin = False
if user in self.admin_users:
isadmin = True
ok = False
result = "string"
if any(cmd in info["alias"] for info in COMMANDS.values()):
cmd = next(c for c, info in COMMANDS.items() if cmd in info["alias"])
if cmd == "auth":
ok, result = self.authenticate(user, args, isadmin, isgroup)
elif cmd == "help" or cmd == "helpp":
if len(args) == 0:
ok, result = True, get_help_text(isadmin, isgroup)
else:
# This can replace the helpp command
plugins = PluginManager().list_plugins()
query_name = args[0].upper()
# search name and namecn
for name, plugincls in plugins.items():
if not plugincls.enabled:
continue
if query_name == name or query_name == plugincls.namecn:
ok, result = True, PluginManager().instances[name].get_help_text(isgroup=isgroup, isadmin=isadmin, verbose=True)
break
if not ok:
result = "插件不存在或未启用"
elif cmd == "model":
if not isadmin and not self.is_admin_in_group(e_context["context"]):
ok, result = False, "需要管理员权限执行"
elif len(args) == 0:
ok, result = True, "当前模型为: " + str(conf().get("model"))
elif len(args) == 1:
if args[0] not in const.MODEL_LIST:
ok, result = False, "模型名称不存在"
else:
conf()["model"] = args[0]
Bridge().reset_bot()
ok, result = True, "模型设置为: " + str(conf().get("model"))
elif cmd == "id":
ok, result = True, user
elif cmd == "set_openai_api_key":
if len(args) == 1:
user_data = conf().get_user_data(user)
user_data["openai_api_key"] = args[0]
ok, result = True, "你的OpenAI私有api_key已设置为" + args[0]
else:
ok, result = False, "请提供一个api_key"
elif cmd == "reset_openai_api_key":
try:
user_data = conf().get_user_data(user)
user_data.pop("openai_api_key")
ok, result = True, "你的OpenAI私有api_key已清除"
except Exception as e:
ok, result = False, "你没有设置私有api_key"
elif cmd == "set_gpt_model":
if len(args) == 1:
user_data = conf().get_user_data(user)
user_data["gpt_model"] = args[0]
ok, result = True, "你的GPT模型已设置为" + args[0]
else:
ok, result = False, "请提供一个GPT模型"
elif cmd == "gpt_model":
user_data = conf().get_user_data(user)
model = conf().get("model")
if "gpt_model" in user_data:
model = user_data["gpt_model"]
ok, result = True, "你的GPT模型为" + str(model)
elif cmd == "reset_gpt_model":
try:
user_data = conf().get_user_data(user)
user_data.pop("gpt_model")
ok, result = True, "你的GPT模型已重置"
except Exception as e:
ok, result = False, "你没有设置私有GPT模型"
elif cmd == "reset":
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI]:
bot.sessions.clear_session(session_id)
channel.cancel_session(session_id)
ok, result = True, "会话已重置"
else:
ok, result = False, "当前对话机器人不支持重置会话"
logger.debug("[Godcmd] command: %s by %s" % (cmd, user))
elif any(cmd in info["alias"] for info in ADMIN_COMMANDS.values()):
if isadmin:
if isgroup:
ok, result = False, "群聊不可执行管理员指令"
else:
cmd = next(c for c, info in ADMIN_COMMANDS.items() if cmd in info["alias"])
if cmd == "stop":
self.isrunning = False
ok, result = True, "服务已暂停"
elif cmd == "resume":
self.isrunning = True
ok, result = True, "服务已恢复"
elif cmd == "update_token":
print("开始修改分享token") | share_token = Share_token_config() | 6 | 2023-12-14 15:21:17+00:00 | 8k |
nerdslab/bams | mouse_triplets.py | [
{
"identifier": "Dataset",
"path": "bams/data/dataset.py",
"snippet": "class Dataset(CachedDataset):\n r\"\"\"Dataset for holding time series data, with input and target features.\n\n Caching is possible if you need to avoid processing the data every time you run\n the script. The cache file wi... | import os
import numpy as np
import argparse
import torch
import torch.nn.functional as F
from datetime import datetime
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from bams.data import Dataset
from bams.data.utils import diff, to_polar_coordinates, angle_clip
from bams.models import BAMS
from bams import HoALoss | 5,464 | skip_frames = 100
view_1_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_2_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id]
view_2 = embs["long_term"][torch.arange(batch_size), view_2_id]
byol_loss_long_term = (
1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean()
)
# backprop
loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term
loss.backward()
optimizer.step()
step += 1
if step % log_every_step == 0:
writer.add_scalar("train/hoa_loss", hoa_loss.item(), step)
writer.add_scalar(
"train/byol_loss_short_term", byol_loss_short_term.item(), step
)
writer.add_scalar(
"train/byol_loss_long_term", byol_loss_long_term.item(), step
)
writer.add_scalar("train/total_loss", loss.item(), step)
return step
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--job",
default="train",
const="train",
nargs="?",
choices=["train", "compute_representations"],
help="select task",
)
parser.add_argument("--data_root", type=str, default="./data/mabe")
parser.add_argument("--cache_path", type=str, default="./data/mabe/mouse_triplet")
parser.add_argument("--hoa_bins", type=int, default=32)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--num_workers", type=int, default=16)
parser.add_argument("--epochs", type=int, default=500)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=4e-5)
parser.add_argument("--log_every_step", type=int, default=50)
parser.add_argument("--ckpt_path", type=str, default=None)
args = parser.parse_args()
if args.job == "train":
train(args)
elif args.job == "compute_representations":
compute_representations(args)
def train(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# dataset
if not Dataset.cache_is_available(args.cache_path, args.hoa_bins):
print("Processing data...")
keypoints, split_mask, batch = load_mice_triplet(args.data_root)
input_feats, target_feats, ignore_frames = mouse_feature_extractor(keypoints)
else:
print("No need to process data")
input_feats = target_feats = ignore_frames = None
dataset = Dataset(
input_feats=input_feats,
target_feats=target_feats,
ignore_frames=ignore_frames,
cache_path=args.cache_path,
cache=True,
hoa_bins=args.hoa_bins,
hoa_window=30,
)
print("Number of sequences:", len(dataset))
# prepare dataloaders
train_loader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=args.num_workers,
pin_memory=True,
)
# build model
model = BAMS(
input_size=dataset.input_size,
short_term=dict(num_channels=(64, 64, 32, 32), kernel_size=3),
long_term=dict(num_channels=(64, 64, 64, 32, 32), kernel_size=3, dilation=4),
predictor=dict(
hidden_layers=(-1, 256, 512, 512, dataset.target_size * args.hoa_bins),
), # frame rate = 30, 6 steps = 200ms
).to(device)
model_name = f"bams-mouse-triplet-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
writer = SummaryWriter("runs/" + model_name)
main_params = [p for name, p in model.named_parameters() if "byol" not in name]
byol_params = list(model.byol_predictors.parameters())
optimizer = optim.AdamW(
[{"params": main_params}, {"params": byol_params, "lr": args.lr * 10}],
lr=args.lr,
weight_decay=args.weight_decay,
)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200], gamma=0.1)
|
#############
# Load data #
#############
def load_mice_triplet(path):
# load raw train data (with annotations for 2 tasks)
data_train = np.load(
os.path.join(path, "mouse_triplet_train.npy"), allow_pickle=True
).item()
sequence_ids_train, sequence_data_train = zip(*data_train["sequences"].items())
keypoints_train = np.stack([data["keypoints"] for data in sequence_data_train])
# load submission data (no annoations)
data_submission = np.load(
os.path.join(path, "mouse_triplet_test.npy"), allow_pickle=True
).item()
sequence_ids_submission, sequence_data_submission = zip(
*data_submission["sequences"].items()
)
keypoints_submission = np.stack(
[data["keypoints"] for data in sequence_data_submission]
)
# concatenate train and submission data
sequence_ids = np.concatenate([sequence_ids_train, sequence_ids_submission], axis=0)
keypoints = np.concatenate([keypoints_train, keypoints_submission], axis=0)
split_mask = np.ones(len(sequence_ids), dtype=bool)
split_mask[-len(sequence_ids_submission) :] = False
# treat each mouse independently, keep track of which video each mouse came from
num_samples, sequence_length, num_mice, num_keypoints, _ = keypoints.shape
keypoints = keypoints.transpose((0, 2, 1, 3, 4))
keypoints = keypoints.reshape((-1, sequence_length, num_keypoints, 2))
batch = np.repeat(np.arange(num_samples), num_mice)
return keypoints, split_mask, batch
################
# Process data #
################
def mouse_feature_extractor(keypoints, noise_thresh=3e-3):
# compute state features
# body part 1: head, keypoints 0, 1, 2, 3
head_center = keypoints[..., 3, :]
head_orientation = np.arctan2(
keypoints[..., 0, 1] - keypoints[..., 3, 1],
keypoints[..., 0, 0] - keypoints[..., 3, 0],
)
# body part 2: forepaws, keypoints 3, 4, 5
# use keypoint 3 as center
left_forepaw = keypoints[..., 4, :] - keypoints[..., 3, :]
right_forepaw = keypoints[..., 5, :] - keypoints[..., 3, :]
left_forepaw_r, left_forepaw_theta = to_polar_coordinates(left_forepaw)
right_forepaw_r, right_forepaw_theta = to_polar_coordinates(right_forepaw)
forepaws_theta = angle_clip(right_forepaw_theta - left_forepaw_theta)
# connection body parts 2-3
spine = keypoints[..., 6, :] - keypoints[..., 3, :]
spine_r, spine_theta = to_polar_coordinates(spine)
# body part 3: bottom, keypoints 6, 7, 8, 9
bottom_center = keypoints[..., 6, :]
# center
bottom = keypoints[..., 7:, :] - bottom_center[..., np.newaxis, :]
bottom_orientation = np.arctan2(
keypoints[..., 6, 1] - keypoints[..., 9, 1],
keypoints[..., 6, 0] - keypoints[..., 9, 0],
)
bottom_rotation = np.array(
[
[np.cos(-bottom_orientation), -np.sin(-bottom_orientation)],
[np.sin(-bottom_orientation), np.cos(-bottom_orientation)],
]
)
# rotate
bottom = np.einsum("ijkp,lpij->ijkl", bottom, bottom_rotation)
left_hindpaw_r, left_hindpaw_theta = to_polar_coordinates(bottom[..., 0, :])
left_hindpaw_theta = left_hindpaw_theta
right_hindpaw_r, right_hindpaw_theta = to_polar_coordinates(bottom[..., 1, :])
right_hindpaw_theta = right_hindpaw_theta
center_to_tail_r, _ = to_polar_coordinates(bottom[..., 2, :])
_, tail_theta_1 = to_polar_coordinates(bottom[..., 3, :] - bottom[..., 2, :])
tail_theta_1 = tail_theta_1
_, tail_theta_2 = to_polar_coordinates(bottom[..., 4, :] - bottom[..., 3, :])
tail_theta_2 = tail_theta_2
# compute action features
### body part 1: head
head_vx = diff(head_center[..., 0])
head_vy = diff(head_center[..., 0])
head_vr, head_vtheta = to_polar_coordinates(np.stack([head_vx, head_vy], axis=-1))
head_vtheta[head_vr < noise_thresh] = 0.0
head_vr[head_vr < noise_thresh] = 0.0
head_dvtheta = angle_clip(diff(head_vtheta))
# orientation
head_orientation_dtheta = angle_clip(diff(head_orientation))
### body part 2: forepaws
# left forepaw
left_forepaw_dr = diff(left_forepaw_r)
left_forepaw_dtheta = angle_clip(diff(left_forepaw_theta))
# right forepaw
right_forepaw_dr = diff(left_forepaw_r)
right_forepaw_dtheta = angle_clip(diff(right_forepaw_theta))
# angle between forepaws
forepaws_dtheta = angle_clip(diff(forepaws_theta))
# body part 3: bottom
# velocity
bottom_vx = diff(bottom_center[..., 0])
bottom_vy = diff(bottom_center[..., 1])
bottom_vr, bottom_vtheta = to_polar_coordinates(
np.stack([bottom_vx, bottom_vy], axis=-1)
)
bottom_vtheta[bottom_vr < noise_thresh] = 0.0
bottom_vr[bottom_vr < noise_thresh] = 0.0
bottom_dvtheta = angle_clip(diff(bottom_vtheta))
# orientation
bottom_orientation_dtheta = angle_clip(diff(bottom_orientation))
# left hindpaw
left_hindpaw_dr = diff(left_hindpaw_r)
left_hindpaw_dtheta = angle_clip(diff(left_hindpaw_theta))
# right hindpaw
right_hindpaw_dr = diff(right_hindpaw_r)
right_hindpaw_dtheta = angle_clip(diff(right_hindpaw_theta))
# body part 4: tail
tail_dtheta_1 = angle_clip(diff(tail_theta_1))
tail_dtheta_2 = angle_clip(diff(tail_theta_2))
# connections between body parts
center_to_tail_dr = diff(center_to_tail_r)
spine_dr = diff(spine_r)
spine_dtheta = angle_clip(diff(spine_theta))
ignore_frames = np.any(keypoints[..., 0] == 0, axis=-1)
ignore_frames[:, 1:] = np.logical_or(ignore_frames[:, 1:], ignore_frames[:, :-1])
input_features = np.stack(
[
head_center[..., 0],
head_center[..., 1],
np.cos(head_orientation),
np.sin(head_orientation),
left_forepaw_r,
np.cos(left_forepaw_theta),
np.sin(left_forepaw_theta),
right_forepaw_r,
np.cos(right_forepaw_theta),
np.sin(right_forepaw_theta),
np.cos(forepaws_theta),
np.sin(forepaws_theta),
bottom_center[..., 0],
bottom_center[..., 1],
np.cos(bottom_orientation),
np.sin(bottom_orientation),
left_hindpaw_r,
np.cos(left_hindpaw_theta),
np.sin(left_hindpaw_theta),
right_hindpaw_r,
np.cos(right_hindpaw_theta),
np.sin(right_hindpaw_theta),
center_to_tail_r,
np.cos(tail_theta_1),
np.sin(tail_theta_1),
np.cos(tail_theta_2),
np.sin(tail_theta_2),
spine_r,
np.cos(spine_theta),
np.sin(spine_theta),
head_vr,
np.cos(head_vtheta),
np.sin(head_vtheta),
np.cos(head_dvtheta),
np.sin(head_dvtheta),
np.cos(head_orientation_dtheta),
np.sin(head_orientation_dtheta),
left_forepaw_dr,
np.cos(left_forepaw_dtheta),
np.sin(left_forepaw_dtheta),
right_forepaw_dr,
np.cos(right_forepaw_dtheta),
np.sin(right_forepaw_dtheta),
np.cos(forepaws_dtheta),
np.sin(forepaws_dtheta),
bottom_vr,
np.cos(bottom_vtheta),
np.sin(bottom_vtheta),
np.cos(bottom_dvtheta),
np.sin(bottom_dvtheta),
np.cos(bottom_orientation_dtheta),
np.sin(bottom_orientation_dtheta),
left_hindpaw_dr,
np.cos(left_hindpaw_dtheta),
np.sin(left_hindpaw_dtheta),
right_hindpaw_dr,
np.cos(right_hindpaw_dtheta),
np.sin(right_hindpaw_dtheta),
np.cos(tail_dtheta_1),
np.sin(tail_dtheta_1),
np.cos(tail_dtheta_2),
np.sin(tail_dtheta_2),
center_to_tail_dr,
spine_dr,
np.cos(spine_dtheta),
np.sin(spine_dtheta),
ignore_frames,
],
axis=-1,
)
target_feats = np.stack(
[
head_vr,
head_vtheta,
head_dvtheta,
head_orientation_dtheta,
bottom_vr,
bottom_vtheta,
bottom_dvtheta,
bottom_orientation_dtheta,
spine_dr,
],
axis=-1,
)
return input_features, target_feats, ignore_frames
#################
# Training loop #
#################
def train_loop(
model, device, loader, optimizer, criterion, writer, step, log_every_step
):
model.train()
for data in tqdm(loader, position=1, leave=False):
# todo convert to float
input = data["input"].float().to(device) # (B, N, L)
target = data["target_hist"].float().to(device)
ignore_weights = data["ignore_weights"].to(device)
# forward pass
optimizer.zero_grad()
embs, hoa_pred, byol_preds = model(input)
# prediction task
hoa_loss = criterion(target, hoa_pred, ignore_weights)
# contrastive loss: short term
batch_size, sequence_length, emb_dim = embs["short_term"].size()
skip_frames, delta = 60, 5
view_1_id = (
torch.randint(sequence_length - skip_frames - delta, (batch_size,))
+ skip_frames
)
view_2_id = torch.randint(delta + 1, (batch_size,)) + view_1_id
view_2_id = torch.clip(view_2_id, 0, sequence_length)
view_1 = byol_preds["short_term"][torch.arange(batch_size), view_1_id]
view_2 = embs["short_term"][torch.arange(batch_size), view_2_id]
byol_loss_short_term = (
1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean()
)
# contrastive loss: long term
batch_size, sequence_length, emb_dim = embs["long_term"].size()
skip_frames = 100
view_1_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_2_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id]
view_2 = embs["long_term"][torch.arange(batch_size), view_2_id]
byol_loss_long_term = (
1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean()
)
# backprop
loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term
loss.backward()
optimizer.step()
step += 1
if step % log_every_step == 0:
writer.add_scalar("train/hoa_loss", hoa_loss.item(), step)
writer.add_scalar(
"train/byol_loss_short_term", byol_loss_short_term.item(), step
)
writer.add_scalar(
"train/byol_loss_long_term", byol_loss_long_term.item(), step
)
writer.add_scalar("train/total_loss", loss.item(), step)
return step
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--job",
default="train",
const="train",
nargs="?",
choices=["train", "compute_representations"],
help="select task",
)
parser.add_argument("--data_root", type=str, default="./data/mabe")
parser.add_argument("--cache_path", type=str, default="./data/mabe/mouse_triplet")
parser.add_argument("--hoa_bins", type=int, default=32)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--num_workers", type=int, default=16)
parser.add_argument("--epochs", type=int, default=500)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=4e-5)
parser.add_argument("--log_every_step", type=int, default=50)
parser.add_argument("--ckpt_path", type=str, default=None)
args = parser.parse_args()
if args.job == "train":
train(args)
elif args.job == "compute_representations":
compute_representations(args)
def train(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# dataset
if not Dataset.cache_is_available(args.cache_path, args.hoa_bins):
print("Processing data...")
keypoints, split_mask, batch = load_mice_triplet(args.data_root)
input_feats, target_feats, ignore_frames = mouse_feature_extractor(keypoints)
else:
print("No need to process data")
input_feats = target_feats = ignore_frames = None
dataset = Dataset(
input_feats=input_feats,
target_feats=target_feats,
ignore_frames=ignore_frames,
cache_path=args.cache_path,
cache=True,
hoa_bins=args.hoa_bins,
hoa_window=30,
)
print("Number of sequences:", len(dataset))
# prepare dataloaders
train_loader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=args.num_workers,
pin_memory=True,
)
# build model
model = BAMS(
input_size=dataset.input_size,
short_term=dict(num_channels=(64, 64, 32, 32), kernel_size=3),
long_term=dict(num_channels=(64, 64, 64, 32, 32), kernel_size=3, dilation=4),
predictor=dict(
hidden_layers=(-1, 256, 512, 512, dataset.target_size * args.hoa_bins),
), # frame rate = 30, 6 steps = 200ms
).to(device)
model_name = f"bams-mouse-triplet-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
writer = SummaryWriter("runs/" + model_name)
main_params = [p for name, p in model.named_parameters() if "byol" not in name]
byol_params = list(model.byol_predictors.parameters())
optimizer = optim.AdamW(
[{"params": main_params}, {"params": byol_params, "lr": args.lr * 10}],
lr=args.lr,
weight_decay=args.weight_decay,
)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200], gamma=0.1) | criterion = HoALoss(hoa_bins=args.hoa_bins, skip_frames=60) | 5 | 2023-12-05 16:26:57+00:00 | 8k |
janmartchouk/vidgen | main.py | [
{
"identifier": "ContentGetter",
"path": "src/content_getter.py",
"snippet": "class ContentGetter:\n def __init__(self, loglevel = logging.INFO):\n self.logger = setup_logger(__name__, loglevel, emoji='🌍')\n\n # Get a list of Reddit Posts from an RSS feed\n def from_subreddit(self, subr... | import logging
import time
import argparse
import concurrent.futures
import json
import os
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from utils.youtube_uploader import YouTubeUploader
from src.content_getter import ContentGetter
from config.dicts import SUBREDDITS
from config.structure import VIDEO_DIR
from src.db import DB
from utils.logger import setup_logger
from src.audio_generator import AudioGenerator
from src.subtitler import Subtitler
from src.composer import Composer
from utils.text import shorten_string | 4,838 |
def update_db(logger, db: DB):
"""
Update the DB with new Posts from Reddit.
"""
start = time.time()
logger.info("Updating DB")
cg = ContentGetter(loglevel=logging.INFO)
new_insertions = 0
with logging_redirect_tqdm(loggers = [logger, cg.logger, db.logger]):
for subreddit in tqdm(SUBREDDITS, desc="Subreddits", leave=False):
for post in tqdm(cg.from_subreddit(subreddit), desc="Posts", leave=False):
if not db.get_post_by_hash(post.hash):
db.insert_post(post)
new_insertions += 1
if args.quick and new_insertions >= args.quick_limit:
logger.debug(f"Quick mode: Stopping after {new_insertions} new insertions")
break
if args.quick and new_insertions >= args.quick_limit:
break
end = time.time()
logger.info(f"DB Update complete. Inserted {new_insertions} new Posts. Finished in {end - start} seconds")
def generate_audio(logger, db: DB, num_threads=16):
"""
Generate audio from Posts in the DB using multiple threads.
"""
start = time.time()
logger.info("Generating audio")
ag = AudioGenerator(loglevel=logging.INFO)
failed_number = 0
successes = 0
all_posts = db.get_all_posts()
if args.quick:
all_posts = all_posts[:args.quick_limit] # only work on quick_limit posts in quick mode
num_posts=len(all_posts)
bar = tqdm(total=num_posts, desc="Audios", leave=False)
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor, logging_redirect_tqdm(loggers=[logger, ag.logger, db.logger]):
future_to_post = {executor.submit(process_individual_post, post, ag, post.audio): post for post in all_posts}
for future in concurrent.futures.as_completed(future_to_post):
post = future_to_post[future]
bar.set_postfix_str(post.short_hash) #update progressbar
try:
result = future.result()
if result:
post.audio = True
db.update_post(post) #TODO
successes += 1
if args.quick and successes >= args.quick_limit:
logger.debug(f"Quick mode: Stopping after {successes} successes")
break
else:
failed_number += 1
logger.debug(f"Failed to generate audio for post {post.short_hash} -- Deleting from DB")
db.delete_post(post) #TODO
except Exception as exc:
logger.error(f"Error processing post {post.short_hash}: {exc}")
finally:
bar.update(1) #update progressbar
end = time.time()
bar.close()
logger.info(f"Generated audio for {successes} Posts ({failed_number} failed). Finished in {end - start} seconds ({(end - start) / successes} seconds per Post)")
def process_individual_post(post, generator, property):
if not property:
if generator.from_post(post):
return True
else:
return False
return True
def generate_subtitles(logger, db: DB):
"""
Generate subtitles from Posts in the DB.
"""
### We cannot multithread this well since Subtitler uses a
### full machine learning model loaded into RAM in the background.
### For multiple threads, we would need to load it multiple times. bad idea.
### If you implement Subtitler() to, i.e., use a server such as the whisper API,
### then you can multithread this
start = time.time()
logger.info("Generating subtitles")
|
def update_db(logger, db: DB):
"""
Update the DB with new Posts from Reddit.
"""
start = time.time()
logger.info("Updating DB")
cg = ContentGetter(loglevel=logging.INFO)
new_insertions = 0
with logging_redirect_tqdm(loggers = [logger, cg.logger, db.logger]):
for subreddit in tqdm(SUBREDDITS, desc="Subreddits", leave=False):
for post in tqdm(cg.from_subreddit(subreddit), desc="Posts", leave=False):
if not db.get_post_by_hash(post.hash):
db.insert_post(post)
new_insertions += 1
if args.quick and new_insertions >= args.quick_limit:
logger.debug(f"Quick mode: Stopping after {new_insertions} new insertions")
break
if args.quick and new_insertions >= args.quick_limit:
break
end = time.time()
logger.info(f"DB Update complete. Inserted {new_insertions} new Posts. Finished in {end - start} seconds")
def generate_audio(logger, db: DB, num_threads=16):
"""
Generate audio from Posts in the DB using multiple threads.
"""
start = time.time()
logger.info("Generating audio")
ag = AudioGenerator(loglevel=logging.INFO)
failed_number = 0
successes = 0
all_posts = db.get_all_posts()
if args.quick:
all_posts = all_posts[:args.quick_limit] # only work on quick_limit posts in quick mode
num_posts=len(all_posts)
bar = tqdm(total=num_posts, desc="Audios", leave=False)
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor, logging_redirect_tqdm(loggers=[logger, ag.logger, db.logger]):
future_to_post = {executor.submit(process_individual_post, post, ag, post.audio): post for post in all_posts}
for future in concurrent.futures.as_completed(future_to_post):
post = future_to_post[future]
bar.set_postfix_str(post.short_hash) #update progressbar
try:
result = future.result()
if result:
post.audio = True
db.update_post(post) #TODO
successes += 1
if args.quick and successes >= args.quick_limit:
logger.debug(f"Quick mode: Stopping after {successes} successes")
break
else:
failed_number += 1
logger.debug(f"Failed to generate audio for post {post.short_hash} -- Deleting from DB")
db.delete_post(post) #TODO
except Exception as exc:
logger.error(f"Error processing post {post.short_hash}: {exc}")
finally:
bar.update(1) #update progressbar
end = time.time()
bar.close()
logger.info(f"Generated audio for {successes} Posts ({failed_number} failed). Finished in {end - start} seconds ({(end - start) / successes} seconds per Post)")
def process_individual_post(post, generator, property):
if not property:
if generator.from_post(post):
return True
else:
return False
return True
def generate_subtitles(logger, db: DB):
"""
Generate subtitles from Posts in the DB.
"""
### We cannot multithread this well since Subtitler uses a
### full machine learning model loaded into RAM in the background.
### For multiple threads, we would need to load it multiple times. bad idea.
### If you implement Subtitler() to, i.e., use a server such as the whisper API,
### then you can multithread this
start = time.time()
logger.info("Generating subtitles") | st = Subtitler(loglevel=logging.INFO) | 6 | 2023-12-14 13:00:22+00:00 | 8k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.