repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in...
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
14,844
self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention()
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention()
pipeline = AnimationFreeInitPipeline(
1
2023-12-19 21:06:32+00:00
24k
m-abr/FCPCodebase
world/World.py
[ { "identifier": "Logger", "path": "logs/Logger.py", "snippet": "class Logger():\n _folder = None\n\n def __init__(self, is_enabled:bool, topic:str) -> None:\n self.no_of_entries = 0 \n self.enabled = is_enabled\n self.topic = topic\n\n def write(self, msg:str, timestamp:boo...
from collections import deque from cpp.ball_predictor import ball_predictor from cpp.localization import localization from logs.Logger import Logger from math import atan2, pi from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Draw import Draw from world.commons.Other_Robot import Other_Robot from world.Robot import Robot import numpy as np
20,401
---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_rel_vel(1) is equivalent to (current rel pos - last rel pos) / 0.04 get_ball_rel_vel(2) is equivalent to (current rel pos - rel pos 0.08s ago) / 0.08 get_ball_rel_vel(3) is equivalent to (current rel pos - rel pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_rel_torso_cart_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_rel_torso_cart_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_rel_torso_cart_pos - self.ball_rel_torso_cart_pos_history[h_step-1]) / t def get_ball_abs_vel(self, history_steps:int): ''' Get ball absolute velocity (m/s) Parameters ---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04 get_ball_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08 get_ball_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_abs_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_abs_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_abs_pos - self.ball_abs_pos_history[h_step-1]) / t def get_predicted_ball_pos(self, max_speed): ''' Get predicted 2D ball position when its predicted speed is equal to or less than `max_speed` In case that position exceeds the prediction horizon, the last available prediction is returned Parameters ---------- max_speed : float maximum speed at which the ball will be moving at returned future position ''' b_sp = self.ball_2d_pred_spd index = len(b_sp) - max( 1, np.searchsorted(b_sp[::-1], max_speed, side='right') ) return self.ball_2d_pred_pos[index] def get_intersection_point_with_ball(self, player_speed): ''' Get 2D intersection point with moving ball, based on `self.ball_2d_pred_pos` Parameters ---------- player_speed : float average speed at which the robot will chase the ball Returns ------- 2D intersection point : ndarray 2D intersection point with moving ball, assuming the robot moves at an avg. speed of `player_speed` intersection distance : float distance between current robot position and intersection point ''' params = np.array([*self.robot.loc_head_position[:2], player_speed*0.02, *self.ball_2d_pred_pos.flat], np.float32) pred_ret = ball_predictor.get_intersection(params) return pred_ret[:2], pred_ret[2] def update(self): r = self.robot PM = self.play_mode W = World # reset variables r.loc_is_up_to_date = False r.loc_head_z_is_up_to_date = False # update play mode groups if PM in (W.M_PLAY_ON, W.M_GAME_OVER): # most common group self.play_mode_group = W.MG_OTHER elif PM in (W.M_OUR_KICKOFF, W.M_OUR_KICK_IN, W.M_OUR_CORNER_KICK, W.M_OUR_GOAL_KICK, W.M_OUR_OFFSIDE, W.M_OUR_PASS, W.M_OUR_DIR_FREE_KICK, W.M_OUR_FREE_KICK): self.play_mode_group = W.MG_OUR_KICK elif PM in (W.M_THEIR_KICK_IN, W.M_THEIR_CORNER_KICK, W.M_THEIR_GOAL_KICK, W.M_THEIR_OFFSIDE, W.M_THEIR_PASS, W.M_THEIR_DIR_FREE_KICK, W.M_THEIR_FREE_KICK, W.M_THEIR_KICKOFF): self.play_mode_group = W.MG_THEIR_KICK elif PM in (W.M_BEFORE_KICKOFF, W.M_THEIR_GOAL): self.play_mode_group = W.MG_ACTIVE_BEAM elif PM in (W.M_OUR_GOAL,): self.play_mode_group = W.MG_PASSIVE_BEAM elif PM is not None: raise ValueError(f'Unexpected play mode ID: {PM}') r.update_pose() # update forward kinematics if self.ball_is_visible: # Compute ball position, relative to torso self.ball_rel_torso_cart_pos = r.head_to_body_part_transform("torso",self.ball_rel_head_cart_pos) if self.vision_is_up_to_date: # update vision based localization # Prepare all variables for localization feet_contact = np.zeros(6) lf_contact = r.frp.get('lf', None) rf_contact = r.frp.get('rf', None) if lf_contact is not None:
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool, enable_draw:bool, logger:Logger, host:str) -> None: self.team_name = team_name # Name of our team self.team_name_opponent : str = None # Name of opponent team self.apply_play_mode_correction = apply_play_mode_correction # True to adjust ball position according to play mode self.step = 0 # Total number of received simulation steps (always in sync with self.time_local_ms) self.time_server = 0.0 # Time, in seconds, as indicated by the server (this time is NOT reliable, use only for synchronization between agents) self.time_local_ms = 0 # Reliable simulation time in milliseconds, use this when possible (it is incremented 20ms for every TCP message) self.time_game = 0.0 # Game time, in seconds, as indicated by the server self.goals_scored = 0 # Goals score by our team self.goals_conceded = 0 # Goals conceded by our team self.team_side_is_left : bool = None # True if our team plays on the left side (this value is later changed by the world parser) self.play_mode = None # Play mode of the soccer game, provided by the server self.play_mode_group = None # Certain play modes share characteristics, so it makes sense to group them self.flags_corners : dict = None # corner flags, key=(x,y,z), always assume we play on the left side self.flags_posts : dict = None # goal posts, key=(x,y,z), always assume we play on the left side self.ball_rel_head_sph_pos = np.zeros(3) # Ball position relative to head (spherical coordinates) (m, deg, deg) self.ball_rel_head_cart_pos = np.zeros(3) # Ball position relative to head (cartesian coordinates) (m) self.ball_rel_torso_cart_pos = np.zeros(3) # Ball position relative to torso (cartesian coordinates) (m) self.ball_rel_torso_cart_pos_history = deque(maxlen=20) # Ball position relative to torso history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos = np.zeros(3) # Ball absolute position (up to date if self.ball_is_visible and self.robot.loc_is_up_to_date) (m) self.ball_abs_pos_history = deque(maxlen=20) # Ball absolute position history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos_last_update = 0 # World.time_local_ms when self.ball_abs_pos was last updated by vision or radio self.ball_abs_vel = np.zeros(3) # Ball velocity vector based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead get_ball_abs_vel) self.ball_abs_speed = 0 # Ball scalar speed based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead ||get_ball_abs_vel||) self.ball_is_visible = False # True if the last server message contained vision information related to the ball self.is_ball_abs_pos_from_vision = False # True if ball_abs_pos originated from vision, False if it originated from radio self.ball_last_seen = 0 # World.time_local_ms when ball was last seen (note: may be different from self.ball_abs_pos_last_update) self.ball_cheat_abs_pos = np.zeros(3) # Absolute ball position provided by the server as cheat (m) self.ball_cheat_abs_vel = np.zeros(3) # Absolute velocity vector based on the last 2 values of self.ball_cheat_abs_pos (m/s) self.ball_2d_pred_pos = np.zeros((1,2)) # prediction of current and future 2D ball positions* self.ball_2d_pred_vel = np.zeros((1,2)) # prediction of current and future 2D ball velocities* self.ball_2d_pred_spd = np.zeros(1) # prediction of current and future 2D ball linear speeds* # *at intervals of 0.02 s until ball comes to a stop or gets out of bounds (according to prediction) self.lines = np.zeros((30,6)) # Position of visible lines, relative to head, start_pos+end_pos (spherical coordinates) (m, deg, deg, m, deg, deg) self.line_count = 0 # Number of visible lines self.vision_last_update = 0 # World.time_local_ms when last vision update was received self.vision_is_up_to_date = False # True if the last server message contained vision information self.teammates = [Other_Robot(i, True ) for i in range(1,12)] # List of teammates, ordered by unum self.opponents = [Other_Robot(i, False) for i in range(1,12)] # List of opponents, ordered by unum self.teammates[unum-1].is_self = True # This teammate is self self.draw = Draw(enable_draw, unum, host, 32769) # Draw object for current player self.team_draw = Draw(enable_draw, 0, host, 32769) # Draw object shared with teammates self.logger = logger self.robot = Robot(unum, robot_type) def log(self, msg:str): ''' Shortcut for: self.logger.write(msg, True, self.step) Parameters ---------- msg : str message to be written after the simulation step ''' self.logger.write(msg, True, self.step) def get_ball_rel_vel(self, history_steps:int): ''' Get ball velocity, relative to torso (m/s) Parameters ---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_rel_vel(1) is equivalent to (current rel pos - last rel pos) / 0.04 get_ball_rel_vel(2) is equivalent to (current rel pos - rel pos 0.08s ago) / 0.08 get_ball_rel_vel(3) is equivalent to (current rel pos - rel pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_rel_torso_cart_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_rel_torso_cart_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_rel_torso_cart_pos - self.ball_rel_torso_cart_pos_history[h_step-1]) / t def get_ball_abs_vel(self, history_steps:int): ''' Get ball absolute velocity (m/s) Parameters ---------- history_steps : int number of history steps to consider [1,20] Examples -------- get_ball_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04 get_ball_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08 get_ball_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12 ''' assert 1 <= history_steps <= 20, "Argument 'history_steps' must be in range [1,20]" if len(self.ball_abs_pos_history) == 0: return np.zeros(3) h_step = min(history_steps, len(self.ball_abs_pos_history)) t = h_step * World.VISUALSTEP return (self.ball_abs_pos - self.ball_abs_pos_history[h_step-1]) / t def get_predicted_ball_pos(self, max_speed): ''' Get predicted 2D ball position when its predicted speed is equal to or less than `max_speed` In case that position exceeds the prediction horizon, the last available prediction is returned Parameters ---------- max_speed : float maximum speed at which the ball will be moving at returned future position ''' b_sp = self.ball_2d_pred_spd index = len(b_sp) - max( 1, np.searchsorted(b_sp[::-1], max_speed, side='right') ) return self.ball_2d_pred_pos[index] def get_intersection_point_with_ball(self, player_speed): ''' Get 2D intersection point with moving ball, based on `self.ball_2d_pred_pos` Parameters ---------- player_speed : float average speed at which the robot will chase the ball Returns ------- 2D intersection point : ndarray 2D intersection point with moving ball, assuming the robot moves at an avg. speed of `player_speed` intersection distance : float distance between current robot position and intersection point ''' params = np.array([*self.robot.loc_head_position[:2], player_speed*0.02, *self.ball_2d_pred_pos.flat], np.float32) pred_ret = ball_predictor.get_intersection(params) return pred_ret[:2], pred_ret[2] def update(self): r = self.robot PM = self.play_mode W = World # reset variables r.loc_is_up_to_date = False r.loc_head_z_is_up_to_date = False # update play mode groups if PM in (W.M_PLAY_ON, W.M_GAME_OVER): # most common group self.play_mode_group = W.MG_OTHER elif PM in (W.M_OUR_KICKOFF, W.M_OUR_KICK_IN, W.M_OUR_CORNER_KICK, W.M_OUR_GOAL_KICK, W.M_OUR_OFFSIDE, W.M_OUR_PASS, W.M_OUR_DIR_FREE_KICK, W.M_OUR_FREE_KICK): self.play_mode_group = W.MG_OUR_KICK elif PM in (W.M_THEIR_KICK_IN, W.M_THEIR_CORNER_KICK, W.M_THEIR_GOAL_KICK, W.M_THEIR_OFFSIDE, W.M_THEIR_PASS, W.M_THEIR_DIR_FREE_KICK, W.M_THEIR_FREE_KICK, W.M_THEIR_KICKOFF): self.play_mode_group = W.MG_THEIR_KICK elif PM in (W.M_BEFORE_KICKOFF, W.M_THEIR_GOAL): self.play_mode_group = W.MG_ACTIVE_BEAM elif PM in (W.M_OUR_GOAL,): self.play_mode_group = W.MG_PASSIVE_BEAM elif PM is not None: raise ValueError(f'Unexpected play mode ID: {PM}') r.update_pose() # update forward kinematics if self.ball_is_visible: # Compute ball position, relative to torso self.ball_rel_torso_cart_pos = r.head_to_body_part_transform("torso",self.ball_rel_head_cart_pos) if self.vision_is_up_to_date: # update vision based localization # Prepare all variables for localization feet_contact = np.zeros(6) lf_contact = r.frp.get('lf', None) rf_contact = r.frp.get('rf', None) if lf_contact is not None:
feet_contact[0:3] = Matrix_4x4( r.body_parts["lfoot"].transform ).translate( lf_contact[0:3] , True).get_translation()
1
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/balance.py
[ { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python...
import typing import torch import numpy as np from typing import Union from enum import IntEnum from .colorspace import ColorSpace from .numerics import Float2, matmul_tl as mm
20,571
elif ilm == cls.Illuminant.F10: return Float2(0.34609, 0.35986) elif ilm == cls.Illuminant.F11: return Float2(0.38052, 0.37713) elif ilm == cls.Illuminant.F12: return Float2(0.43695, 0.40441) elif ilm == cls.Illuminant.LED_B1: return Float2(0.4560, 0.4078) elif ilm == cls.Illuminant.LED_B2: return Float2(0.4357, 0.4012) elif ilm == cls.Illuminant.LED_B3: return Float2(0.3756, 0.3723) elif ilm == cls.Illuminant.LED_B4: return Float2(0.3422, 0.3502) elif ilm == cls.Illuminant.LED_B5: return Float2(0.3118, 0.3236) elif ilm == cls.Illuminant.LED_BH1: return Float2(0.4474, 0.4066) elif ilm == cls.Illuminant.LED_RGB1: return Float2(0.4557, 0.4211) elif ilm == cls.Illuminant.LED_V1: return Float2(0.4560, 0.4548) elif ilm == cls.Illuminant.LED_V2: return Float2(0.3781, 0.3775) else: raise Exception(f"Invalid illuminant: {illuminant.name}") @staticmethod def wp_from_cct(cct:int) -> Float2: """ Compute CIE xy chromaticity coordinates (white point) from correlated colour temperature. :param cct: Correlated colour temperature in range [4000, 25000] :return: White point coordinates (CIE xy) """ # https://github.com/colour-science/colour/blob/develop/colour/temperature/cie_d.py # BSD-3-Clause license: # Copyright 2013 Colour Developers # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE assert 4000 <= cct <= 25000, "Correlated color temperature must be in range [4000, 25000]" cct = float(cct) cct_3 = cct**3 cct_2 = cct**2 x = 0. if cct <= 7000.: x = -4.607 * 10**9 / cct_3 \ + 2.9678 * 10**6 / cct_2 \ + 0.09911 * 10**3 / cct \ + 0.244063 else: x = -2.0064 * 10**9 / cct_3 \ + 1.9018 * 10**6 / cct_2 \ + 0.24748 * 10**3 / cct \ + 0.23704 y = -3.000 * x**2 + 2.870 * x - 0.275 return Float2(x, y) @staticmethod def wp_from_image(im_xyz:Union[torch.Tensor, ColorImage]) -> Float2: """ Estimate the dominant illuminant of an environment map or a target image directly and return its approximate CIE xy chromaticity coordinates (white point). .. warning:: This is a lazy method that just averages the pixels in the image tensor. There is no spherical mapping, nor PCA, nor any serious attempt to analyze the image. :param im_xyz: Image tensor in CIE XYZ color space :type im_xyz: torch.Tensor | ColorImage :return: Estimated white point coordinates (CIE xy) """ mean_color = torch.tensor([[[im_xyz[0:1].mean(), im_xyz[1:2].mean(), im_xyz[2:3].mean()]]]).permute(2, 0, 1) csum = mean_color[0] + mean_color[1] + mean_color[2] mean_color[0] /= csum mean_color[1] /= csum return Float2(mean_color[0].item(), mean_color[1].item()) @staticmethod def apply( im_lms:Union[torch.Tensor, ColorImage], source_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray], target_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray]) -> torch.Tensor: """ Apply white balance. :param im_lms: Image tensor in LMS color space :type im_lms: torch.Tensor | ColorImage :param source_white: Source white point coordinates (CIE xy) :type source_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :param target_white: Target white point coordinates (CIE xy) :type target_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :return: White balanced image tensor """ source = torch.tensor([[[source_white[0], source_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) target = torch.tensor([[[target_white[0], target_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) src_lms = ColorSpace.convert(source, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) dst_lms = ColorSpace.convert(target, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) mat = [ [dst_lms[0].item()/src_lms[0].item(), 0., 0.], [0., dst_lms[1].item()/src_lms[1].item(), 0.], [0., 0., dst_lms[2].item()/src_lms[2].item()]]
from __future__ import annotations class WhiteBalance: """ Adjust white balance of an image. Example: .. highlight:: python .. code-block:: python source_white = WhiteBalance.wp_from_image(input_image) target_white = WhiteBalance.wp_from_illuminant(WhiteBalance.Illuminant.NORTH_SKY) white_balanced_image = WhiteBalance.apply(input_image, source_white, target_white) """ class Illuminant(IntEnum): """ CIE 1931 2° standard illuminant. Available options are: .. highlight:: text .. code-block:: text - NONE - A (INCANDESCENT, TUNGSTEN) - D50 (HORIZON) - D55 (MIDMORNING) - D65 (DAYLIGHT_NOON) - D75 (NORTH SKY) - D93 (BLUE_PHOSPHOR) - E (EQUAL_ENERGY) - F1 (FLUORESCENT_DAYLIGHT1) - F2 (FLUORESCENT_COOL_WHITE) - F3 (FLUORESCENT_WHITE) - F4 (FLUORESCENT_WARM_WHITE) - F5 (FLUORESCENT_DAYLIGHT2) - F6 (FLUORESCENT_LIGHT_WHITE) - F7 (D65_SIMULATOR, DAYLIGHT_SIMULATOR) - F8 (D50_SIMULATOR, SYLVANIA_F40) - F9 (FLOURESCENT_COOL_WHITE_DELUXE) - F10 (PHILIPS_TL85, ULTRALUME_50) - F11 (PHILIPS_TL84, ULTRALUME_40) - F12 (PHILIPS_TL83, ULTRALUME_30) - LED_B1 (PHOSPHOR_CONVERTED_BLUE1) - LED_B2 (PHOSPHOR_CONVERTED_BLUE2) - LED_B3 (PHOSPHOR_CONVERTED_BLUE3) - LED_B4 (PHOSPHOR_CONVERTED_BLUE4) - LED_B5 (PHOSPHOR_CONVERTED_BLUE5) - LED_BH1 - LED_RGB1 - LED_V1 (LED_VIOLET1) - LED_V2 (LED_VIOLET2) """ NONE = 0 A = 1 D50 = 2 D55 = 3 D65 = 4 D75 = 5 D93 = 6 E = 7 F1 = 8 F2 = 9 F3 = 10 F4 = 11 F5 = 12 F6 = 13 F7 = 14 F8 = 15 F9 = 16 F10 = 17 F11 = 18 F12 = 19 LED_B1 = 20 LED_B2 = 21 LED_B3 = 22 LED_B4 = 23 LED_B5 = 24 LED_BH1 = 25 LED_RGB1 = 26 LED_V1 = 27 LED_V2 = 28 INCANDESCENT = A TUNGSTEN = A HORIZON = D50 MIDMORNING = D55 DAYLIGHT_NOON = D65 NORTH_SKY = D75 BLUE_PHOSPHOR = D93 EQUAL_ENERGY = E FLUORESCENT_DAYLIGHT1 = F1 FLUORESCENT_COOL_WHITE = F2 FLUORESCENT_WHITE = F3 FLUORESCENT_WARM_WHITE = F4 FLUORESCENT_DAYLIGHT2 = F5 FLUORESCENT_LIGHT_WHITE = F6 D65_SIMULATOR = F7 DAYLIGHT_SIMULATOR = F7 D50_SIMULATOR = F8 SYLVANIA_F40 = F8 FLOURESCENT_COOL_WHITE_DELUXE = F9 PHILIPS_TL85 = F10 ULTRALUME_50 = F10 PHILIPS_TL84 = F11 ULTRALUME_40 = F11 PHILIPS_TL83 = F12 ULTRALUME_30 = F12 PHOSPHOR_CONVERTED_BLUE1 = LED_B1 PHOSPHOR_CONVERTED_BLUE2 = LED_B2 PHOSPHOR_CONVERTED_BLUE3 = LED_B3 PHOSPHOR_CONVERTED_BLUE4 = LED_B4 PHOSPHOR_CONVERTED_BLUE5 = LED_B5 LED_VIOLET1 = LED_V1 LED_VIOLET2 = LED_V2 @classmethod def wp_from_illuminant(cls, illuminant:Illuminant) -> Float2: """ Look up chromaticity coordinates (white point) of a CIE 1931 2° standard illuminant. :param illuminant: Standard illuminant :return: White point coordinates (CIE xy) """ # https://en.wikipedia.org/wiki/Standard_illuminant ilm = illuminant if ilm == cls.Illuminant.A: return Float2(0.44757, 0.40745) elif ilm == cls.Illuminant.D50: return Float2(0.34567, 0.35850) elif ilm == cls.Illuminant.D55: return Float2(0.33242, 0.34743) elif ilm == cls.Illuminant.D65: return Float2(0.31271, 0.32902) elif ilm == cls.Illuminant.D75: return Float2(0.29902, 0.31485) elif ilm == cls.Illuminant.D93: return Float2(0.28315, 0.29711) elif ilm == cls.Illuminant.E: return Float2(0.33333, 0.33333) elif ilm == cls.Illuminant.F1: return Float2(0.31310, 0.33727) elif ilm == cls.Illuminant.F2: return Float2(0.37208, 0.37529) elif ilm == cls.Illuminant.F3: return Float2(0.40910, 0.39430) elif ilm == cls.Illuminant.F4: return Float2(0.44018, 0.40329) elif ilm == cls.Illuminant.F5: return Float2(0.31379, 0.34531) elif ilm == cls.Illuminant.F6: return Float2(0.37790, 0.38835) elif ilm == cls.Illuminant.F7: return Float2(0.31292, 0.32933) elif ilm == cls.Illuminant.F8: return Float2(0.34588, 0.35875) elif ilm == cls.Illuminant.F9: return Float2(0.37417, 0.37281) elif ilm == cls.Illuminant.F10: return Float2(0.34609, 0.35986) elif ilm == cls.Illuminant.F11: return Float2(0.38052, 0.37713) elif ilm == cls.Illuminant.F12: return Float2(0.43695, 0.40441) elif ilm == cls.Illuminant.LED_B1: return Float2(0.4560, 0.4078) elif ilm == cls.Illuminant.LED_B2: return Float2(0.4357, 0.4012) elif ilm == cls.Illuminant.LED_B3: return Float2(0.3756, 0.3723) elif ilm == cls.Illuminant.LED_B4: return Float2(0.3422, 0.3502) elif ilm == cls.Illuminant.LED_B5: return Float2(0.3118, 0.3236) elif ilm == cls.Illuminant.LED_BH1: return Float2(0.4474, 0.4066) elif ilm == cls.Illuminant.LED_RGB1: return Float2(0.4557, 0.4211) elif ilm == cls.Illuminant.LED_V1: return Float2(0.4560, 0.4548) elif ilm == cls.Illuminant.LED_V2: return Float2(0.3781, 0.3775) else: raise Exception(f"Invalid illuminant: {illuminant.name}") @staticmethod def wp_from_cct(cct:int) -> Float2: """ Compute CIE xy chromaticity coordinates (white point) from correlated colour temperature. :param cct: Correlated colour temperature in range [4000, 25000] :return: White point coordinates (CIE xy) """ # https://github.com/colour-science/colour/blob/develop/colour/temperature/cie_d.py # BSD-3-Clause license: # Copyright 2013 Colour Developers # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE assert 4000 <= cct <= 25000, "Correlated color temperature must be in range [4000, 25000]" cct = float(cct) cct_3 = cct**3 cct_2 = cct**2 x = 0. if cct <= 7000.: x = -4.607 * 10**9 / cct_3 \ + 2.9678 * 10**6 / cct_2 \ + 0.09911 * 10**3 / cct \ + 0.244063 else: x = -2.0064 * 10**9 / cct_3 \ + 1.9018 * 10**6 / cct_2 \ + 0.24748 * 10**3 / cct \ + 0.23704 y = -3.000 * x**2 + 2.870 * x - 0.275 return Float2(x, y) @staticmethod def wp_from_image(im_xyz:Union[torch.Tensor, ColorImage]) -> Float2: """ Estimate the dominant illuminant of an environment map or a target image directly and return its approximate CIE xy chromaticity coordinates (white point). .. warning:: This is a lazy method that just averages the pixels in the image tensor. There is no spherical mapping, nor PCA, nor any serious attempt to analyze the image. :param im_xyz: Image tensor in CIE XYZ color space :type im_xyz: torch.Tensor | ColorImage :return: Estimated white point coordinates (CIE xy) """ mean_color = torch.tensor([[[im_xyz[0:1].mean(), im_xyz[1:2].mean(), im_xyz[2:3].mean()]]]).permute(2, 0, 1) csum = mean_color[0] + mean_color[1] + mean_color[2] mean_color[0] /= csum mean_color[1] /= csum return Float2(mean_color[0].item(), mean_color[1].item()) @staticmethod def apply( im_lms:Union[torch.Tensor, ColorImage], source_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray], target_white:Union[Float2, Chromaticity, torch.Tensor, numpy.ndarray]) -> torch.Tensor: """ Apply white balance. :param im_lms: Image tensor in LMS color space :type im_lms: torch.Tensor | ColorImage :param source_white: Source white point coordinates (CIE xy) :type source_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :param target_white: Target white point coordinates (CIE xy) :type target_white: Float2 | Chromaticity | torch.Tensor | numpy.ndarray :return: White balanced image tensor """ source = torch.tensor([[[source_white[0], source_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) target = torch.tensor([[[target_white[0], target_white[0], 1.]]], dtype=torch.float32).permute(2, 0, 1) src_lms = ColorSpace.convert(source, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) dst_lms = ColorSpace.convert(target, ColorSpace.Variant.CIE_XYY, ColorSpace.Variant.LMS) mat = [ [dst_lms[0].item()/src_lms[0].item(), 0., 0.], [0., dst_lms[1].item()/src_lms[1].item(), 0.], [0., 0., dst_lms[2].item()/src_lms[2].item()]]
corrected = mm(im_lms, mat)
0
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,706
output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition1=controlnet_condition1, condition2=controlnet_condition2, device=device, dtype=torch.float16, #fix cung num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], processors: List[ControlNetProcessor], #fix video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition1=controlnet_condition1, condition2=controlnet_condition2, device=device, dtype=torch.float16, #fix cung num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
context_scheduler = get_context_scheduler(context_schedule)
3
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/contrib/socks.py
[ { "identifier": "HTTPConnection", "path": ".venv/Lib/site-packages/urllib3/connection.py", "snippet": "class HTTPConnection(_HTTPConnection):\n \"\"\"\n Based on :class:`http.client.HTTPConnection` but provides an extra constructor\n backwards-compatibility layer between older and newer Pythons...
import socks # type: ignore[import] import warnings import typing import ssl from ..exceptions import DependencyWarning from socket import timeout as SocketTimeout from ..connection import HTTPConnection, HTTPSConnection from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool from ..exceptions import ConnectTimeoutError, NewConnectionError from ..poolmanager import PoolManager from ..util.url import parse_url from typing import TypedDict
20,041
try: except ImportError: warnings.warn( ( "SOCKS support in urllib3 requires the installation of optional " "dependencies: specifically, PySocks. For more information, see " "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies" ), DependencyWarning, ) raise try: except ImportError: ssl = None # type: ignore[assignment] class _TYPE_SOCKS_OPTIONS(TypedDict): socks_version: int proxy_host: str | None proxy_port: str | None username: str | None password: str | None rdns: bool class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ def __init__( self, _socks_options: _TYPE_SOCKS_OPTIONS, *args: typing.Any, **kwargs: typing.Any, ) -> None: self._socks_options = _socks_options super().__init__(*args, **kwargs) def _new_conn(self) -> socks.socksocket: """ Establish a new connection via the SOCKS proxy. """ extra_kw: dict[str, typing.Any] = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), proxy_type=self._socks_options["socks_version"], proxy_addr=self._socks_options["proxy_host"], proxy_port=self._socks_options["proxy_port"], proxy_username=self._socks_options["username"], proxy_password=self._socks_options["password"], proxy_rdns=self._socks_options["rdns"], timeout=self.timeout, **extra_kw, ) except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise # useful errors here. if e.socket_err: error = e.socket_err if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e else: # Adding `from e` messes with coverage somehow, so it's omitted. # See #2386. raise NewConnectionError( self, f"Failed to establish a new connection: {error}" ) else: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e except OSError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e return conn # We don't need to duplicate the Verified/Unverified distinction from # urllib3/connection.py here because the HTTPSConnection will already have been # correctly set to either the Verified or Unverified form by that module. This # means the SOCKSHTTPSConnection will automatically be the correct type. class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): pass class SOCKSHTTPConnectionPool(HTTPConnectionPool): ConnectionCls = SOCKSConnection class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = SOCKSHTTPSConnection
""" This module contains provisional support for SOCKS proxies from within urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and SOCKS5. To enable its functionality, either install PySocks or install this module with the ``socks`` extra. The SOCKS implementation supports the full range of urllib3 features. It also supports the following SOCKS features: - SOCKS4A (``proxy_url='socks4a://...``) - SOCKS4 (``proxy_url='socks4://...``) - SOCKS5 with remote DNS (``proxy_url='socks5h://...``) - SOCKS5 with local DNS (``proxy_url='socks5://...``) - Usernames and passwords for the SOCKS proxy .. note:: It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in your ``proxy_url`` to ensure that DNS resolution is done from the remote server instead of client-side when connecting to a domain name. SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5 supports IPv4, IPv6, and domain names. When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url`` will be sent as the ``userid`` section of the SOCKS request: .. code-block:: python proxy_url="socks4a://<userid>@proxy-host" When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion of the ``proxy_url`` will be sent as the username/password to authenticate with the proxy: .. code-block:: python proxy_url="socks5h://<username>:<password>@proxy-host" """ from __future__ import annotations try: except ImportError: warnings.warn( ( "SOCKS support in urllib3 requires the installation of optional " "dependencies: specifically, PySocks. For more information, see " "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies" ), DependencyWarning, ) raise try: except ImportError: ssl = None # type: ignore[assignment] class _TYPE_SOCKS_OPTIONS(TypedDict): socks_version: int proxy_host: str | None proxy_port: str | None username: str | None password: str | None rdns: bool class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ def __init__( self, _socks_options: _TYPE_SOCKS_OPTIONS, *args: typing.Any, **kwargs: typing.Any, ) -> None: self._socks_options = _socks_options super().__init__(*args, **kwargs) def _new_conn(self) -> socks.socksocket: """ Establish a new connection via the SOCKS proxy. """ extra_kw: dict[str, typing.Any] = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), proxy_type=self._socks_options["socks_version"], proxy_addr=self._socks_options["proxy_host"], proxy_port=self._socks_options["proxy_port"], proxy_username=self._socks_options["username"], proxy_password=self._socks_options["password"], proxy_rdns=self._socks_options["rdns"], timeout=self.timeout, **extra_kw, ) except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise # useful errors here. if e.socket_err: error = e.socket_err if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e else: # Adding `from e` messes with coverage somehow, so it's omitted. # See #2386. raise NewConnectionError( self, f"Failed to establish a new connection: {error}" ) else: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e except OSError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e return conn # We don't need to duplicate the Verified/Unverified distinction from # urllib3/connection.py here because the HTTPSConnection will already have been # correctly set to either the Verified or Unverified form by that module. This # means the SOCKSHTTPSConnection will automatically be the correct type. class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): pass class SOCKSHTTPConnectionPool(HTTPConnectionPool): ConnectionCls = SOCKSConnection class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
6
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\...
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,729
css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png.
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png.
imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param
5
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
plugins/default_fine_tuning_method.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n F...
from typing import List from sentence_transformers import SentenceTransformer from embedding_studio.core.config import settings from embedding_studio.core.plugin import FineTuningMethod from embedding_studio.embeddings.data.clickstream.parsers.s3_parser import ( AWSS3ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.search_event import ( DummyEventType, SearchResult, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.clickstream.text_query_item import ( TextQueryItem, ) from embedding_studio.embeddings.data.clickstream.text_query_retriever import ( TextQueryRetriever, ) from embedding_studio.embeddings.data.loaders.s3.s3_loader import ( AWSS3DataLoader, ) from embedding_studio.embeddings.data.storages.producers.clip import ( CLIPItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.embeddings.losses.prob_cosine_margin_ranking_loss import ( CosineProbMarginRankingLoss, ) from embedding_studio.embeddings.models.text_to_image.clip import ( TextToImageCLIPModel, ) from embedding_studio.models.clickstream.sessions import SessionWithEvents from embedding_studio.models.plugin import FineTuningBuilder, PluginMeta from embedding_studio.workers.fine_tuning.data.prepare_data import prepare_data from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.initial_params.clip import ( INITIAL_PARAMS, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
16,901
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager( tracking_uri=settings.MLFLOW_TRACKING_URI, main_metric="test_not_irrelevant_dist_shift", accumulators=self.accumulators, ) self.initial_params = INITIAL_PARAMS self.initial_params.update( { "not_irrelevant_only": [True], "negative_downsampling": [ 0.5, ], "examples_order": [ [ 11, ] ], } ) self.settings = FineTuningSettings( loss_func=CosineProbMarginRankingLoss(), step_size=35, test_each_n_sessions=0.5, num_epochs=3, ) def upload_initial_model(self) -> None: model = TextToImageCLIPModel(SentenceTransformer("clip-ViT-B-32")) self.manager.upload_initial_model(model) def get_fine_tuning_builder( self, clickstream: List[SessionWithEvents]
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager( tracking_uri=settings.MLFLOW_TRACKING_URI, main_metric="test_not_irrelevant_dist_shift", accumulators=self.accumulators, ) self.initial_params = INITIAL_PARAMS self.initial_params.update( { "not_irrelevant_only": [True], "negative_downsampling": [ 0.5, ], "examples_order": [ [ 11, ] ], } ) self.settings = FineTuningSettings( loss_func=CosineProbMarginRankingLoss(), step_size=35, test_each_n_sessions=0.5, num_epochs=3, ) def upload_initial_model(self) -> None: model = TextToImageCLIPModel(SentenceTransformer("clip-ViT-B-32")) self.manager.upload_initial_model(model) def get_fine_tuning_builder( self, clickstream: List[SessionWithEvents]
) -> FineTuningBuilder:
14
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n...
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
17,235
runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo( runner_cls=PAIREDRunner, is_ued=True ) } class ExperimentRunner: def __init__( self, train_runner, env_name, agent_rl_algo, student_model_name, teacher_model_name=None, train_runner_kwargs={}, env_kwargs={}, ued_env_kwargs={}, student_rl_kwargs={}, teacher_rl_kwargs={}, student_model_kwargs={}, teacher_model_kwargs={}, eval_kwargs={}, eval_env_kwargs={}, n_devices=1, ): self.env_name = env_name self.agent_rl_algo = agent_rl_algo self.is_ued = RUNNER_INFO[train_runner].is_ued dummy_env = envs.make( env_name, env_kwargs, ued_env_kwargs)[0] # ---- Make agent ---- student_model_kwargs['output_dim'] = dummy_env.action_space().n student_model = models.make( env_name=env_name, model_name=student_model_name, **student_model_kwargs ) student_agent = agents.PPOAgent( model=student_model, n_devices=n_devices, **student_rl_kwargs ) # ---- Handle UED-related settings ---- if self.is_ued: max_teacher_steps = dummy_env.ued_max_episode_steps() teacher_model_kwargs['n_scalar_embeddings'] = max_teacher_steps teacher_model_kwargs['max_scalar'] = max_teacher_steps teacher_model_kwargs['output_dim'] = dummy_env.ued_action_space().n teacher_model = models.make( env_name=env_name, model_name=teacher_model_name, **teacher_model_kwargs ) teacher_agent = agents.PPOAgent( model=teacher_model, n_devices=n_devices, **teacher_rl_kwargs ) train_runner_kwargs.update(dict( teacher_agents=[teacher_agent] )) train_runner_kwargs.update(dict( ued_env_kwargs=ued_env_kwargs )) # Debug, tabulate student and teacher model # import jax.numpy as jnp # dummy_rng = jax.random.PRNGKey(0) # obs, _ = dummy_env.reset(dummy_rng) # hx = student_model.initialize_carry(dummy_rng, (1,)) # ued_obs, _ = dummy_env.reset_teacher(dummy_rng) # ued_hx = teacher_model.initialize_carry(dummy_rng, (1,)) # obs['image'] = jnp.expand_dims(obs['image'], 0) # ued_obs['image'] = jnp.expand_dims(ued_obs['image'], 0) # print(student_model.tabulate(dummy_rng, obs, hx)) # print(teacher_model.tabulate(dummy_rng, ued_obs, hx)) # import pdb; pdb.set_trace() # ---- Set up train runner ---- runner_cls = RUNNER_INFO[train_runner].runner_cls # Set up learning rate annealing parameters lr_init = train_runner_kwargs.lr lr_final = train_runner_kwargs.lr_final lr_anneal_steps = train_runner_kwargs.lr_anneal_steps if lr_final is None: train_runner_kwargs.lr_final = lr_init if train_runner_kwargs.lr_final == train_runner_kwargs.lr: train_runner_kwargs.lr_anneal_steps = 0 self.runner = runner_cls( env_name=env_name, env_kwargs=env_kwargs, student_agents=[student_agent], n_devices=n_devices, **train_runner_kwargs) # ---- Make eval runner ---- if eval_kwargs.get('env_names') is None: self.eval_runner = None else:
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo( runner_cls=PAIREDRunner, is_ued=True ) } class ExperimentRunner: def __init__( self, train_runner, env_name, agent_rl_algo, student_model_name, teacher_model_name=None, train_runner_kwargs={}, env_kwargs={}, ued_env_kwargs={}, student_rl_kwargs={}, teacher_rl_kwargs={}, student_model_kwargs={}, teacher_model_kwargs={}, eval_kwargs={}, eval_env_kwargs={}, n_devices=1, ): self.env_name = env_name self.agent_rl_algo = agent_rl_algo self.is_ued = RUNNER_INFO[train_runner].is_ued dummy_env = envs.make( env_name, env_kwargs, ued_env_kwargs)[0] # ---- Make agent ---- student_model_kwargs['output_dim'] = dummy_env.action_space().n student_model = models.make( env_name=env_name, model_name=student_model_name, **student_model_kwargs ) student_agent = agents.PPOAgent( model=student_model, n_devices=n_devices, **student_rl_kwargs ) # ---- Handle UED-related settings ---- if self.is_ued: max_teacher_steps = dummy_env.ued_max_episode_steps() teacher_model_kwargs['n_scalar_embeddings'] = max_teacher_steps teacher_model_kwargs['max_scalar'] = max_teacher_steps teacher_model_kwargs['output_dim'] = dummy_env.ued_action_space().n teacher_model = models.make( env_name=env_name, model_name=teacher_model_name, **teacher_model_kwargs ) teacher_agent = agents.PPOAgent( model=teacher_model, n_devices=n_devices, **teacher_rl_kwargs ) train_runner_kwargs.update(dict( teacher_agents=[teacher_agent] )) train_runner_kwargs.update(dict( ued_env_kwargs=ued_env_kwargs )) # Debug, tabulate student and teacher model # import jax.numpy as jnp # dummy_rng = jax.random.PRNGKey(0) # obs, _ = dummy_env.reset(dummy_rng) # hx = student_model.initialize_carry(dummy_rng, (1,)) # ued_obs, _ = dummy_env.reset_teacher(dummy_rng) # ued_hx = teacher_model.initialize_carry(dummy_rng, (1,)) # obs['image'] = jnp.expand_dims(obs['image'], 0) # ued_obs['image'] = jnp.expand_dims(ued_obs['image'], 0) # print(student_model.tabulate(dummy_rng, obs, hx)) # print(teacher_model.tabulate(dummy_rng, ued_obs, hx)) # import pdb; pdb.set_trace() # ---- Set up train runner ---- runner_cls = RUNNER_INFO[train_runner].runner_cls # Set up learning rate annealing parameters lr_init = train_runner_kwargs.lr lr_final = train_runner_kwargs.lr_final lr_anneal_steps = train_runner_kwargs.lr_anneal_steps if lr_final is None: train_runner_kwargs.lr_final = lr_init if train_runner_kwargs.lr_final == train_runner_kwargs.lr: train_runner_kwargs.lr_anneal_steps = 0 self.runner = runner_cls( env_name=env_name, env_kwargs=env_kwargs, student_agents=[student_agent], n_devices=n_devices, **train_runner_kwargs) # ---- Make eval runner ---- if eval_kwargs.get('env_names') is None: self.eval_runner = None else:
self.eval_runner = EvalRunner(
0
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings...
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
17,914
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices)
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices)
skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree)
1
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
MSTH/video_pipeline.py
[ { "identifier": "base_config", "path": "nerfstudio/configs/base_config.py", "snippet": "class PrintableConfig: # pylint: disable=too-few-public-methods\nclass InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\nclass MachineConfig(PrintableConfig):\nclass LocalWriterConfig(In...
import random import typing import cv2 import imageio import numpy as np import torch import torch.distributed as dist import wandb from abc import abstractmethod from dataclasses import dataclass, field from time import time from typing import Any, Dict, List, Mapping, Optional, Type, Union, cast from rich.progress import ( BarColumn, MofNCompleteColumn, Progress, TextColumn, TimeElapsedColumn, ) from torch import nn from torch.nn import Parameter from torch.nn.parallel import DistributedDataParallel as DDP from torchvision.io import write_video from tqdm import tqdm, trange from typing_extensions import Literal from nerfstudio.configs import base_config as cfg from nerfstudio.data.datamanagers.base_datamanager import ( DataManager, DataManagerConfig, VanillaDataManager, VanillaDataManagerConfig, ) from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes from nerfstudio.models.base_model import Model, ModelConfig from nerfstudio.pipelines.base_pipeline import Pipeline, VanillaPipelineConfig from nerfstudio.utils import colormaps, profiler from MSTH.datamanager import ( SpaceTimeDataManager, SpaceTimeDataManagerConfig, VideoDataManager, VideoDataManagerConfig, VideoFeatureDataManager, VideoFeatureDataManagerConfig, ) from MSTH.SpaceTimeHashing.render import get_render_cameras from MSTH.utils import Timer, get_chart
16,369
from __future__ import annotations def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model: """ If DDP, then return the .module. Otherwise, return the model. """ if isinstance(ddp_or_model, DDP): return cast(Model, ddp_or_model.module) return ddp_or_model @dataclass class VideoPipelineConfig(cfg.InstantiateConfig): _target: Type = field(default_factory=lambda: VideoPipeline) datamanager: Union[
from __future__ import annotations def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model: """ If DDP, then return the .module. Otherwise, return the model. """ if isinstance(ddp_or_model, DDP): return cast(Model, ddp_or_model.module) return ddp_or_model @dataclass class VideoPipelineConfig(cfg.InstantiateConfig): _target: Type = field(default_factory=lambda: VideoPipeline) datamanager: Union[
VideoDataManagerConfig, VideoFeatureDataManagerConfig, DataManagerConfig
2
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kw...
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
19,881
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__()
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__()
egnn_dynamics = EGNNDynamics(
4
2023-10-30 02:53:38+00:00
24k
nv-tlabs/pacer
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings...
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,830
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
7
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n...
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
15,583
0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset) goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False) ptd = None if self.cfg.env.loadCADPTD: ptd_file = object_urdf.parent.joinpath(f'point_cloud_{self.cfg.env.objCadNumPts}_pts.pkl') if ptd_file.exists():
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset) goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False) ptd = None if self.cfg.env.loadCADPTD: ptd_file = object_urdf.parent.joinpath(f'point_cloud_{self.cfg.env.objCadNumPts}_pts.pkl') if ptd_file.exists():
ptd = load_from_pickle(ptd_file)
3
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/activation_resampler/tests/test_activation_resampler.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then p...
from jaxtyping import Float, Int64 from torch import Tensor from torch.nn import Parameter from sparse_autoencoder.activation_resampler.activation_resampler import ActivationResampler from sparse_autoencoder.activation_store.base_store import ActivationStore from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder, SparseAutoencoderConfig from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss from sparse_autoencoder.loss.reducer import LossReducer from sparse_autoencoder.tensor_types import Axis import pytest import torch
16,456
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture()
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture()
def autoencoder_model() -> SparseAutoencoder:
3
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.paddi...
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
20,587
for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]:
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]:
model = AugmentedPropertyPredictor(args, alphabet)
1
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: ...
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
15,636
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(FILE.stem, opt) return opt def main(opt):
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(FILE.stem, opt) return opt def main(opt):
check_requirements(exclude=('tensorboard', 'thop'))
9
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @a...
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
17,410
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value
if algorithm_name == Algorithm.RSA:
1
2023-10-28 14:57:59+00:00
24k
chenran-li/RQL-release
sb3_contrib/ars/ars.py
[ { "identifier": "BaseAlgorithm", "path": "stable_baselines3/common/base_class.py", "snippet": "class BaseAlgorithm(ABC):\n \"\"\"\n The base of RL algorithms\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from\n (i...
import copy import sys import time import warnings import numpy as np import torch as th import torch.nn.utils from functools import partial from typing import Any, Dict, Optional, Type, TypeVar, Union from gym import spaces from stable_baselines3.common.base_class import BaseAlgorithm from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_zip_file from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_schedule_fn, safe_mean from sb3_contrib.ars.policies import ARSPolicy, LinearPolicy, MlpPolicy from sb3_contrib.common.vec_env.async_eval import AsyncEval
15,394
SelfARS = TypeVar("SelfARS", bound="ARS") class ARS(BaseAlgorithm): """ Augmented Random Search: https://arxiv.org/abs/1803.07055 Original implementation: https://github.com/modestyachts/ARS C++/Cuda Implementation: https://github.com/google-research/tiny-differentiable-simulator/ 150 LOC Numpy Implementation: https://github.com/alexis-jacq/numpy_ARS/blob/master/asr.py :param policy: The policy to train, can be an instance of ``ARSPolicy``, or a string from ["LinearPolicy", "MlpPolicy"] :param env: The environment to train on, may be a string if registered with gym :param n_delta: How many random perturbations of the policy to try at each update step. :param n_top: How many of the top delta to use in each update step. Default is n_delta :param learning_rate: Float or schedule for the step size :param delta_std: Float or schedule for the exploration noise :param zero_policy: Boolean determining if the passed policy should have it's weights zeroed before training. :param alive_bonus_offset: Constant added to the reward at each step, used to cancel out alive bonuses. :param n_eval_episodes: Number of episodes to evaluate each candidate. :param policy_kwargs: Keyword arguments to pass to the policy on creation :param tensorboard_log: String with the directory to put tensorboard logs: :param seed: Random seed for the training :param verbose: Verbosity level: 0 no output, 1 info, 2 debug :param device: Torch device to use for training, defaults to "cpu" :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "LinearPolicy": LinearPolicy, } def __init__( self, policy: Union[str, Type[ARSPolicy]], env: Union[GymEnv, str], n_delta: int = 8, n_top: Optional[int] = None,
SelfARS = TypeVar("SelfARS", bound="ARS") class ARS(BaseAlgorithm): """ Augmented Random Search: https://arxiv.org/abs/1803.07055 Original implementation: https://github.com/modestyachts/ARS C++/Cuda Implementation: https://github.com/google-research/tiny-differentiable-simulator/ 150 LOC Numpy Implementation: https://github.com/alexis-jacq/numpy_ARS/blob/master/asr.py :param policy: The policy to train, can be an instance of ``ARSPolicy``, or a string from ["LinearPolicy", "MlpPolicy"] :param env: The environment to train on, may be a string if registered with gym :param n_delta: How many random perturbations of the policy to try at each update step. :param n_top: How many of the top delta to use in each update step. Default is n_delta :param learning_rate: Float or schedule for the step size :param delta_std: Float or schedule for the exploration noise :param zero_policy: Boolean determining if the passed policy should have it's weights zeroed before training. :param alive_bonus_offset: Constant added to the reward at each step, used to cancel out alive bonuses. :param n_eval_episodes: Number of episodes to evaluate each candidate. :param policy_kwargs: Keyword arguments to pass to the policy on creation :param tensorboard_log: String with the directory to put tensorboard logs: :param seed: Random seed for the training :param verbose: Verbosity level: 0 no output, 1 info, 2 debug :param device: Torch device to use for training, defaults to "cpu" :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "LinearPolicy": LinearPolicy, } def __init__( self, policy: Union[str, Type[ARSPolicy]], env: Union[GymEnv, str], n_delta: int = 8, n_top: Optional[int] = None,
learning_rate: Union[float, Schedule] = 0.02,
5
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ...
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
15,498
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda',
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda',
) -> TabModel:
5
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/signals/eds_tem.py
[ { "identifier": "EDSSpectrum", "path": "exspy/signals/eds.py", "snippet": "class EDSSpectrum(Signal1D):\n \"\"\"General signal class for EDS spectra.\"\"\"\n\n _signal_type = \"EDS\"\n\n def __init__(self, *args, **kwards):\n super().__init__(*args, **kwards)\n if self.metadata.Si...
import warnings import logging import traits.api as t import numpy as np import pint import hyperspy.api as hs from scipy import constants from hyperspy.signal import BaseSetMetadataItems, BaseSignal from hyperspy import utils from hyperspy.docstrings.signal import LAZYSIGNAL_DOC from hyperspy.ui_registry import add_gui_method, DISPLAY_DT, TOOLKIT_DT from hyperspy.misc.utils import isiterable from hyperspy.external.progressbar import progressbar from hyperspy.axes import DataAxis from .eds import EDSSpectrum, LazyEDSSpectrum from exspy._defaults_parser import preferences from exspy.misc import material from exspy.misc.eds import utils as utils_eds from exspy.misc.elements import elements as elements_db from scipy.ndimage import binary_dilation, binary_erosion from exspy.models.edstemmodel import EDSTEMModel
14,559
if take_off_angle == "auto": toa = self.get_take_off_angle() else: toa = take_off_angle # determining illumination area for cross sections quantification. if method == "cross_section": if probe_area == "auto": parameters = self.metadata.Acquisition_instrument.TEM if probe_area in parameters: probe_area = parameters.TEM.probe_area else: probe_area = self.get_probe_area( navigation_axes=self.axes_manager.navigation_axes ) int_stack = utils.stack(intensities, lazy=False, show_progressbar=False) comp_old = np.zeros_like(int_stack.data) abs_corr_factor = None # initial if method == "CL": quantification_method = utils_eds.quantification_cliff_lorimer kwargs = { "intensities": int_stack.data, "kfactors": factors, "absorption_correction": abs_corr_factor, "mask": navigation_mask, } elif method == "zeta": quantification_method = utils_eds.quantification_zeta_factor kwargs = { "intensities": int_stack.data, "zfactors": factors, "dose": self._get_dose(method), "absorption_correction": abs_corr_factor, } elif method == "cross_section": quantification_method = utils_eds.quantification_cross_section kwargs = { "intensities": int_stack.data, "cross_sections": factors, "dose": self._get_dose(method, **kwargs), "absorption_correction": abs_corr_factor, } else: raise ValueError( "Please specify method for quantification, " 'as "CL", "zeta" or "cross_section".' ) while True: results = quantification_method(**kwargs) if method == "CL": composition.data = results * 100.0 if absorption_correction: if thickness is not None: mass_thickness = intensities[0].deepcopy() mass_thickness.data = self.CL_get_mass_thickness( composition.split(), thickness ) mass_thickness.metadata.General.title = "Mass thickness" else: raise ValueError( "Thickness is required for absorption correction " "with k-factor method. Results will contain no " "correction for absorption." ) elif method == "zeta": composition.data = results[0] * 100 mass_thickness = intensities[0].deepcopy() mass_thickness.data = results[1] else: composition.data = results[0] * 100.0 number_of_atoms = composition._deepcopy_with_new_data(results[1]) if method == "cross_section": if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_cross_section( composition.split(), number_of_atoms.split(), toa, probe_area ) kwargs["absorption_correction"] = abs_corr_factor else: if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_zeta( composition.split(), mass_thickness, toa ) kwargs["absorption_correction"] = abs_corr_factor res_max = np.max(composition.data - comp_old) comp_old = composition.data if absorption_correction and show_progressbar: pbar.update(1) it += 1 if not absorption_correction or abs(res_max) < convergence_criterion: break elif it >= max_iterations: raise Exception( "Absorption correction failed as solution " f"did not converge after {max_iterations} " "iterations" ) if method == "cross_section": number_of_atoms = composition._deepcopy_with_new_data(results[1]) number_of_atoms = number_of_atoms.split() composition = composition.split() else: composition = composition.split() # convert ouput units to selection as required. if composition_units == "atomic": if method != "cross_section":
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. _logger = logging.getLogger(__name__) @add_gui_method(toolkey="exspy.microscope_parameters_EDS_TEM") class EDSTEMParametersUI(BaseSetMetadataItems): beam_energy = t.Float(t.Undefined, label="Beam energy (keV)") real_time = t.Float(t.Undefined, label="Real time (s)") tilt_stage = t.Float(t.Undefined, label="Stage tilt (degree)") live_time = t.Float(t.Undefined, label="Live time (s)") probe_area = t.Float(t.Undefined, label="Beam/probe area (nm²)") azimuth_angle = t.Float(t.Undefined, label="Azimuth angle (degree)") elevation_angle = t.Float(t.Undefined, label="Elevation angle (degree)") energy_resolution_MnKa = t.Float(t.Undefined, label="Energy resolution MnKa (eV)") beam_current = t.Float(t.Undefined, label="Beam current (nA)") mapping = { "Acquisition_instrument.TEM.beam_energy": "beam_energy", "Acquisition_instrument.TEM.Stage.tilt_alpha": "tilt_stage", "Acquisition_instrument.TEM.Detector.EDS.live_time": "live_time", "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle": "azimuth_angle", "Acquisition_instrument.TEM.Detector.EDS.elevation_angle": "elevation_angle", "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa": "energy_resolution_MnKa", "Acquisition_instrument.TEM.beam_current": "beam_current", "Acquisition_instrument.TEM.probe_area": "probe_area", "Acquisition_instrument.TEM.Detector.EDS.real_time": "real_time", } class EDSTEMSpectrum(EDSSpectrum): """Signal class for EDS spectra measured in an TEM.""" _signal_type = "EDS_TEM" def __init__(self, *args, **kwards): super().__init__(*args, **kwards) # Attributes defaults if "Acquisition_instrument.TEM.Detector.EDS" not in self.metadata: if "Acquisition_instrument.SEM.Detector.EDS" in self.metadata: self.metadata.set_item( "Acquisition_instrument.TEM", self.metadata.Acquisition_instrument.SEM, ) del self.metadata.Acquisition_instrument.SEM self._set_default_param() def _set_default_param(self): """Set to value to default (defined in preferences)""" mp = self.metadata mp.Signal.signal_type = "EDS_TEM" mp = self.metadata if "Acquisition_instrument.TEM.Stage.tilt_alpha" not in mp: mp.set_item( "Acquisition_instrument.TEM.Stage.tilt_alpha", preferences.EDS.eds_tilt_stage, ) if "Acquisition_instrument.TEM.Detector.EDS.elevation_angle" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS.elevation_angle", preferences.EDS.eds_detector_elevation, ) if "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS." + "energy_resolution_MnKa", preferences.EDS.eds_mn_ka, ) if "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle" not in mp: mp.set_item( "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", preferences.EDS.eds_detector_azimuth, ) def set_microscope_parameters( self, beam_energy=None, live_time=None, tilt_stage=None, azimuth_angle=None, elevation_angle=None, energy_resolution_MnKa=None, beam_current=None, probe_area=None, real_time=None, display=True, toolkit=None, ): if set( [ beam_energy, live_time, tilt_stage, azimuth_angle, elevation_angle, energy_resolution_MnKa, beam_current, probe_area, real_time, ] ) == {None}: tem_par = EDSTEMParametersUI(self) return tem_par.gui(display=display, toolkit=toolkit) md = self.metadata if beam_energy is not None: md.set_item("Acquisition_instrument.TEM.beam_energy ", beam_energy) if live_time is not None: md.set_item("Acquisition_instrument.TEM.Detector.EDS.live_time", live_time) if tilt_stage is not None: md.set_item("Acquisition_instrument.TEM.Stage.tilt_alpha", tilt_stage) if azimuth_angle is not None: md.set_item( "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle", azimuth_angle ) if elevation_angle is not None: md.set_item( "Acquisition_instrument.TEM.Detector.EDS.elevation_angle", elevation_angle, ) if energy_resolution_MnKa is not None: md.set_item( "Acquisition_instrument.TEM.Detector.EDS." + "energy_resolution_MnKa", energy_resolution_MnKa, ) if beam_current is not None: md.set_item("Acquisition_instrument.TEM.beam_current", beam_current) if probe_area is not None: md.set_item("Acquisition_instrument.TEM.probe_area", probe_area) if real_time is not None: md.set_item("Acquisition_instrument.TEM.Detector.EDS.real_time", real_time) set_microscope_parameters.__doc__ = """ Set the microscope parameters. If no arguments are given, raises an interactive mode to fill the values. Parameters ---------- beam_energy: float The energy of the electron beam in keV live_time : float In seconds tilt_stage : float In degree azimuth_angle : float In degree elevation_angle : float In degree energy_resolution_MnKa : float In eV beam_current: float In nA probe_area: float In nm² real_time: float In seconds {} {} Examples -------- >>> s = exspy.data.EDS_TEM_FePt_nanoparticles() >>> print(s.metadata.Acquisition_instrument. >>> TEM.Detector.EDS.energy_resolution_MnKa) >>> s.set_microscope_parameters(energy_resolution_MnKa=135.) >>> print(s.metadata.Acquisition_instrument. >>> TEM.Detector.EDS.energy_resolution_MnKa) 133.312296 135.0 """.format( DISPLAY_DT, TOOLKIT_DT ) def _are_microscope_parameters_missing(self): """Check if the EDS parameters necessary for quantification are defined in metadata.""" must_exist = ( "Acquisition_instrument.TEM.beam_energy", "Acquisition_instrument.TEM.Detector.EDS.live_time", ) missing_parameters = [] for item in must_exist: exists = self.metadata.has_item(item) if exists is False: missing_parameters.append(item) if missing_parameters: _logger.info("Missing parameters {}".format(missing_parameters)) return True else: return False def get_calibration_from(self, ref, nb_pix=1): """Copy the calibration and all metadata of a reference. Primary use: To add a calibration to ripple file from INCA software Parameters ---------- ref : signal The reference contains the calibration in its metadata nb_pix : int The live time (real time corrected from the "dead time") is divided by the number of pixel (spectrums), giving an average live time. Raises ------ NotImplementedError If the signal axis is a non-uniform axis. Examples -------- >>> ref = exspy.data.EDS_TEM_FePt_nanoparticles() >>> s = exspy.data.EDS_TEM_FePt_nanoparticles(ref.data) >>> print(s.axes_manager[0].scale) >>> s.get_calibration_from(ref) >>> print(s.axes_manager[0].scale) 1.0 0.020028 """ self._original_metadata = ref.original_metadata.deepcopy() # Setup the axes_manager ax_m = self.axes_manager.signal_axes[0] ax_ref = ref.axes_manager.signal_axes[0] for _axis in [ax_m, ax_ref]: if not _axis.is_uniform: raise NotImplementedError( "The function is not implemented for non-uniform axes." ) ax_m.scale = ax_ref.scale ax_m.units = ax_ref.units ax_m.offset = ax_ref.offset # Setup metadata if "Acquisition_instrument.TEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.TEM elif "Acquisition_instrument.SEM" in ref.metadata: mp_ref = ref.metadata.Acquisition_instrument.SEM else: raise ValueError( "The reference has no metadata " "'Acquisition_instrument.TEM '" "or 'metadata.Acquisition_instrument.SEM'." ) mp = self.metadata mp.Acquisition_instrument.TEM = mp_ref.deepcopy() if mp_ref.has_item("Detector.EDS.live_time"): mp.Acquisition_instrument.TEM.Detector.EDS.live_time = ( mp_ref.Detector.EDS.live_time / nb_pix ) def quantification( self, intensities, method, factors, composition_units="atomic", absorption_correction=False, take_off_angle="auto", thickness="auto", convergence_criterion=0.5, navigation_mask=1.0, closing=True, plot_result=False, probe_area="auto", max_iterations=30, show_progressbar=None, **kwargs, ): """ Absorption corrected quantification using Cliff-Lorimer, the zeta-factor method, or ionization cross sections. The function iterates through quantification function until two successive interations don't change the final composition by a defined percentage critera (0.5% by default). Parameters ---------- intensities: list of signal the intensitiy for each X-ray lines. method: {'CL', 'zeta', 'cross_section'} Set the quantification method: Cliff-Lorimer, zeta-factor, or ionization cross sections. factors: list of float The list of kfactors, zeta-factors or cross sections in same order as intensities. Note that intensities provided by Hyperspy are sorted by the alphabetical order of the X-ray lines. eg. factors =[0.982, 1.32, 1.60] for ['Al_Ka', 'Cr_Ka', 'Ni_Ka']. composition_units: {'atomic', 'weight'} The quantification returns the composition in 'atomic' percent by default, but can also return weight percent if specified. absorption_correction: bool Specify whether or not an absorption correction should be applied. 'False' by default so absorption will not be applied unless specfied. take_off_angle : {'auto'} The angle between the sample surface and the vector along which X-rays travel to reach the centre of the detector. thickness: {'auto'} thickness in nm (can be a single value or have the same navigation dimension as the signal). NB: Must be specified for 'CL' method. For 'zeta' or 'cross_section' methods, first quantification step provides a mass_thickness internally during quantification. convergence_criterion: The convergence criterium defined as the percentage difference between 2 successive iterations. 0.5% by default. navigation_mask : None or float or signal The navigation locations marked as True are not used in the quantification. If float is given the vacuum_mask method is used to generate a mask with the float value as threhsold. Else provides a signal with the navigation shape. Only for the 'Cliff-Lorimer' method. closing: bool If true, applied a morphologic closing to the mask obtained by vacuum_mask. plot_result : bool If True, plot the calculated composition. If the current object is a single spectrum it prints the result instead. probe_area = {'auto'} This allows the user to specify the probe_area for interaction with the sample needed specifically for the cross_section method of quantification. When left as 'auto' the pixel area is used, calculated from the navigation axes information. max_iterations : int An upper limit to the number of calculations for absorption correction. kwargs The extra keyword arguments are passed to plot. Returns ------- A list of quantified elemental maps (signal) giving the composition of the sample in weight or atomic percent with absorption correciton taken into account based on the sample thickness estimate provided. If the method is 'zeta' this function also returns the mass thickness profile for the data. If the method is 'cross_section' this function also returns the atom counts for each element. Examples -------- >>> s = exspy.data.EDS_TEM_FePt_nanoparticles() >>> s.add_lines() >>> kfactors = [1.450226, 5.075602] #For Fe Ka and Pt La >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0]) >>> s.plot(background_windows=bw) >>> intensities = s.get_lines_intensity(background_windows=bw) >>> res = s.quantification(intensities, kfactors, plot_result=True, >>> composition_units='atomic') Fe (Fe_Ka): Composition = 15.41 atomic percent Pt (Pt_La): Composition = 84.59 atomic percent See also -------- vacuum_mask """ if not isinstance(intensities, (list, tuple)) or not isinstance( intensities[0], BaseSignal ): raise ValueError("The parameter `intensities` must be a list of signals.") elif len(intensities) <= 1: raise ValueError("Several X-ray line intensities are required.") if isinstance(navigation_mask, float): if self.axes_manager.navigation_dimension > 0: navigation_mask = self.vacuum_mask(navigation_mask, closing) else: navigation_mask = None xray_lines = [ intensity.metadata.Sample.xray_lines[0] for intensity in intensities ] it = 0 if absorption_correction: if show_progressbar is None: # pragma: no cover show_progressbar = hs.preferences.General.show_progressbar if show_progressbar: pbar = progressbar(total=None, desc="Absorption correction calculation") composition = utils.stack(intensities, lazy=False, show_progressbar=False) if take_off_angle == "auto": toa = self.get_take_off_angle() else: toa = take_off_angle # determining illumination area for cross sections quantification. if method == "cross_section": if probe_area == "auto": parameters = self.metadata.Acquisition_instrument.TEM if probe_area in parameters: probe_area = parameters.TEM.probe_area else: probe_area = self.get_probe_area( navigation_axes=self.axes_manager.navigation_axes ) int_stack = utils.stack(intensities, lazy=False, show_progressbar=False) comp_old = np.zeros_like(int_stack.data) abs_corr_factor = None # initial if method == "CL": quantification_method = utils_eds.quantification_cliff_lorimer kwargs = { "intensities": int_stack.data, "kfactors": factors, "absorption_correction": abs_corr_factor, "mask": navigation_mask, } elif method == "zeta": quantification_method = utils_eds.quantification_zeta_factor kwargs = { "intensities": int_stack.data, "zfactors": factors, "dose": self._get_dose(method), "absorption_correction": abs_corr_factor, } elif method == "cross_section": quantification_method = utils_eds.quantification_cross_section kwargs = { "intensities": int_stack.data, "cross_sections": factors, "dose": self._get_dose(method, **kwargs), "absorption_correction": abs_corr_factor, } else: raise ValueError( "Please specify method for quantification, " 'as "CL", "zeta" or "cross_section".' ) while True: results = quantification_method(**kwargs) if method == "CL": composition.data = results * 100.0 if absorption_correction: if thickness is not None: mass_thickness = intensities[0].deepcopy() mass_thickness.data = self.CL_get_mass_thickness( composition.split(), thickness ) mass_thickness.metadata.General.title = "Mass thickness" else: raise ValueError( "Thickness is required for absorption correction " "with k-factor method. Results will contain no " "correction for absorption." ) elif method == "zeta": composition.data = results[0] * 100 mass_thickness = intensities[0].deepcopy() mass_thickness.data = results[1] else: composition.data = results[0] * 100.0 number_of_atoms = composition._deepcopy_with_new_data(results[1]) if method == "cross_section": if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_cross_section( composition.split(), number_of_atoms.split(), toa, probe_area ) kwargs["absorption_correction"] = abs_corr_factor else: if absorption_correction: abs_corr_factor = utils_eds.get_abs_corr_zeta( composition.split(), mass_thickness, toa ) kwargs["absorption_correction"] = abs_corr_factor res_max = np.max(composition.data - comp_old) comp_old = composition.data if absorption_correction and show_progressbar: pbar.update(1) it += 1 if not absorption_correction or abs(res_max) < convergence_criterion: break elif it >= max_iterations: raise Exception( "Absorption correction failed as solution " f"did not converge after {max_iterations} " "iterations" ) if method == "cross_section": number_of_atoms = composition._deepcopy_with_new_data(results[1]) number_of_atoms = number_of_atoms.split() composition = composition.split() else: composition = composition.split() # convert ouput units to selection as required. if composition_units == "atomic": if method != "cross_section":
composition = material.weight_to_atomic(composition)
3
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n ...
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
17,048
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda()
11
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strin...
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
15,119
synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment( hex_to_ecc_point(synapse.g, synapse.curve), hex_to_ecc_point(synapse.h, synapse.curve), ) bt.logging.trace(f"entering commit()") c, m_val, r = committer.commit(encrypted_byte_data + str(synapse.seed).encode()) if self.config.miner.verbose: bt.logging.debug(f"committer: {committer}") bt.logging.debug(f"encrypted_byte_data: {encrypted_byte_data}") bt.logging.debug(f"c: {c}") bt.logging.debug(f"m_val: {m_val}") bt.logging.debug(f"r: {r}") # Send back some proof that we stored the data synapse.randomness = r synapse.commitment = ecc_point_to_hex(c) bt.logging.trace(f"signed commitment: {synapse.commitment}") # Initialize the commitment hash with the initial commitment for chained proofs synapse.commitment_hash = str(m_val) bt.logging.trace(f"initial commitment_hash: {synapse.commitment_hash}") if self.config.miner.verbose: bt.logging.debug(f"signed m_val: {synapse.signature.hex()}") bt.logging.debug(f"type(seed): {type(synapse.seed)}") bt.logging.debug(f"initial commitment_hash: {synapse.commitment_hash}") bt.logging.info( f"stored data hash {data_hash} with commitment: {synapse.commitment}" ) # Don't send data back, no need. synapse.encrypted_data = base64.b64encode(b"").decode() # Empty b64 response return synapse async def challenge( self, synapse: storage.protocol.Challenge ) -> storage.protocol.Challenge: """ Handles a data challenge by providing cryptographic proof of data possession. This method retrieves the specified data from storage, calculates its commitment using elliptic curve cryptography, and constructs a Merkle proof. The response includes the requested data chunk, Merkle proof, root, and the commitment, which collectively serve as verifiable evidence of data possession. Args: synapse (storage.protocol.Challenge): An object representing the challenge request, which includes parameters such as the hash of the data to retrieve, chunk size, challenge index, and elliptic curve parameters for commitment calculation. Returns: storage.protocol.Challenge: The synapse object is updated with the response to the challenge, including the encrypted data chunk, commitment point, Merkle proof, and root hash. The method performs the following steps: 1. Fetches the encrypted data from storage using the hash provided in the challenge. 2. Splits the data into chunks based on the specified chunk size. 3. Computes a new commitment hash to provide a time-bound proof of possession. 4. Generates a Merkle tree from the committed data chunks and extracts a proof for the requested chunk. 5. Encodes the requested chunk and Merkle proof in base64 for transmission. 6. Updates the challenge synapse with the commitment, data chunk, randomness, and Merkle proof. 7. Records the updated commitment hash in storage for future challenges. This method ensures data integrity and allows the verification of data possession without disclosing the entire data set. It is designed to fulfill data verification requests in a secure and verifiable manner. Example usage: Assuming an initialized 'synapse' object with the challenge parameters: >>> updated_synapse = self.challenge(synapse) """ # Retrieve the data itself from miner storage bt.logging.info(f"received challenge hash: {synapse.challenge_hash}") self.request_count += 1 bt.logging.trace(f"entering get_chunk_metadata()") data = await get_chunk_metadata(self.database, synapse.challenge_hash) if data is None: bt.logging.error(f"No data found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"retrieved data: {pformat(data)}") # Chunk the data according to the specified (random) chunk size filepath = data.get(b"filepath", None) if filepath is None: bt.logging.warning( f"No file found for {synapse.challenge_hash} in index, trying path construction..." ) # fallback to load the data from the filesystem via database path construction filepath = os.path.expanduser( f"{self.config.database.directory}/{synapse.challenge_hash}" ) if not os.path.isfile(filepath): bt.logging.error( f"No file found for {synapse.challenge_hash} in {self.config.database.directory}." ) return synapse bt.logging.trace(f"entering load_from_filesystem()") try: encrypted_data_bytes = load_from_filesystem(filepath) except Exception as e: bt.logging.error(f"Error loading file {filepath}: {e}") synapse.axon.status_code = 404 synapse.axon.status_message = "File not found" return synapse # Construct the next commitment hash using previous commitment and hash # of the data to prove storage over time prev_seed = data.get(b"seed", "").encode() if prev_seed == None: bt.logging.error(f"No seed found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"entering comput_subsequent_commitment()...") new_seed = synapse.seed.encode()
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem filepath = save_data_to_filesystem( encrypted_byte_data, self.config.database.directory, str(data_hash) ) bt.logging.trace(f"stored data {data_hash} in filepath: {filepath}") # Add the initial chunk, size, and validator seed information await store_chunk_metadata( self.database, data_hash, filepath, synapse.dendrite.hotkey, sys.getsizeof(encrypted_byte_data), synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment( hex_to_ecc_point(synapse.g, synapse.curve), hex_to_ecc_point(synapse.h, synapse.curve), ) bt.logging.trace(f"entering commit()") c, m_val, r = committer.commit(encrypted_byte_data + str(synapse.seed).encode()) if self.config.miner.verbose: bt.logging.debug(f"committer: {committer}") bt.logging.debug(f"encrypted_byte_data: {encrypted_byte_data}") bt.logging.debug(f"c: {c}") bt.logging.debug(f"m_val: {m_val}") bt.logging.debug(f"r: {r}") # Send back some proof that we stored the data synapse.randomness = r synapse.commitment = ecc_point_to_hex(c) bt.logging.trace(f"signed commitment: {synapse.commitment}") # Initialize the commitment hash with the initial commitment for chained proofs synapse.commitment_hash = str(m_val) bt.logging.trace(f"initial commitment_hash: {synapse.commitment_hash}") if self.config.miner.verbose: bt.logging.debug(f"signed m_val: {synapse.signature.hex()}") bt.logging.debug(f"type(seed): {type(synapse.seed)}") bt.logging.debug(f"initial commitment_hash: {synapse.commitment_hash}") bt.logging.info( f"stored data hash {data_hash} with commitment: {synapse.commitment}" ) # Don't send data back, no need. synapse.encrypted_data = base64.b64encode(b"").decode() # Empty b64 response return synapse async def challenge( self, synapse: storage.protocol.Challenge ) -> storage.protocol.Challenge: """ Handles a data challenge by providing cryptographic proof of data possession. This method retrieves the specified data from storage, calculates its commitment using elliptic curve cryptography, and constructs a Merkle proof. The response includes the requested data chunk, Merkle proof, root, and the commitment, which collectively serve as verifiable evidence of data possession. Args: synapse (storage.protocol.Challenge): An object representing the challenge request, which includes parameters such as the hash of the data to retrieve, chunk size, challenge index, and elliptic curve parameters for commitment calculation. Returns: storage.protocol.Challenge: The synapse object is updated with the response to the challenge, including the encrypted data chunk, commitment point, Merkle proof, and root hash. The method performs the following steps: 1. Fetches the encrypted data from storage using the hash provided in the challenge. 2. Splits the data into chunks based on the specified chunk size. 3. Computes a new commitment hash to provide a time-bound proof of possession. 4. Generates a Merkle tree from the committed data chunks and extracts a proof for the requested chunk. 5. Encodes the requested chunk and Merkle proof in base64 for transmission. 6. Updates the challenge synapse with the commitment, data chunk, randomness, and Merkle proof. 7. Records the updated commitment hash in storage for future challenges. This method ensures data integrity and allows the verification of data possession without disclosing the entire data set. It is designed to fulfill data verification requests in a secure and verifiable manner. Example usage: Assuming an initialized 'synapse' object with the challenge parameters: >>> updated_synapse = self.challenge(synapse) """ # Retrieve the data itself from miner storage bt.logging.info(f"received challenge hash: {synapse.challenge_hash}") self.request_count += 1 bt.logging.trace(f"entering get_chunk_metadata()") data = await get_chunk_metadata(self.database, synapse.challenge_hash) if data is None: bt.logging.error(f"No data found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"retrieved data: {pformat(data)}") # Chunk the data according to the specified (random) chunk size filepath = data.get(b"filepath", None) if filepath is None: bt.logging.warning( f"No file found for {synapse.challenge_hash} in index, trying path construction..." ) # fallback to load the data from the filesystem via database path construction filepath = os.path.expanduser( f"{self.config.database.directory}/{synapse.challenge_hash}" ) if not os.path.isfile(filepath): bt.logging.error( f"No file found for {synapse.challenge_hash} in {self.config.database.directory}." ) return synapse bt.logging.trace(f"entering load_from_filesystem()") try: encrypted_data_bytes = load_from_filesystem(filepath) except Exception as e: bt.logging.error(f"Error loading file {filepath}: {e}") synapse.axon.status_code = 404 synapse.axon.status_message = "File not found" return synapse # Construct the next commitment hash using previous commitment and hash # of the data to prove storage over time prev_seed = data.get(b"seed", "").encode() if prev_seed == None: bt.logging.error(f"No seed found for {synapse.challenge_hash}") return synapse bt.logging.trace(f"entering comput_subsequent_commitment()...") new_seed = synapse.seed.encode()
next_commitment, proof = compute_subsequent_commitment(
12
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/agent.py
[ { "identifier": "AgentState", "path": "memgpt/data_types.py", "snippet": "class AgentState:\n def __init__(\n self,\n name: str,\n user_id: uuid.UUID,\n persona: str, # the filename where the persona was originally sourced from\n human: str, # the filename where t...
import datetime import uuid import glob import inspect import os import json import traceback from pathlib import Path from typing import List, Tuple from box import Box from memgpt.data_types import AgentState, Message from memgpt.models import chat_completion_response from memgpt.interface import AgentInterface from memgpt.persistence_manager import PersistenceManager, LocalStateManager from memgpt.config import MemGPTConfig from memgpt.system import get_login_event, package_function_response, package_summarize_message, get_initial_boot_messages from memgpt.memory import CoreMemory as InContextMemory, summarize_messages from memgpt.llm_api_tools import create, is_context_overflow_error from memgpt.utils import ( get_tool_call_id, get_local_time, parse_json, united_diff, printd, count_tokens, get_schema_diff, validate_function_response, verify_first_message_correctness, ) from memgpt.constants import ( FIRST_MESSAGE_ATTEMPTS, MESSAGE_SUMMARY_WARNING_FRAC, MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC, MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST, CORE_MEMORY_HUMAN_CHAR_LIMIT, CORE_MEMORY_PERSONA_CHAR_LIMIT, LLM_MAX_TOKENS, CLI_WARNING_PREFIX, JSON_ENSURE_ASCII, ) from .errors import LLMError from .functions.functions import USER_FUNCTIONS_DIR, load_all_function_sets
16,095
self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # If no failures happened along the way: ... # Step 4: send the info on the function call and function response to GPT self.interface.function_message(f"Success: {function_response_string}") messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default
def link_functions(function_schemas): """Link function definitions to list of function schemas""" # need to dynamically link the functions # the saved agent.functions will just have the schemas, but we need to # go through the functions library and pull the respective python functions # Available functions is a mapping from: # function_name -> { # json_schema: schema # python_function: function # } # agent.functions is a list of schemas (OpenAI kwarg functions style, see: https://platform.openai.com/docs/api-reference/chat/create) # [{'name': ..., 'description': ...}, {...}] available_functions = load_all_function_sets() linked_function_set = {} for f_schema in function_schemas: # Attempt to find the function in the existing function library f_name = f_schema.get("name") if f_name is None: raise ValueError(f"While loading agent.state.functions encountered a bad function schema object with no name:\n{f_schema}") linked_function = available_functions.get(f_name) if linked_function is None: raise ValueError( f"Function '{f_name}' was specified in agent.state.functions, but is not in function library:\n{available_functions.keys()}" ) # Once we find a matching function, make sure the schema is identical if json.dumps(f_schema, ensure_ascii=JSON_ENSURE_ASCII) != json.dumps( linked_function["json_schema"], ensure_ascii=JSON_ENSURE_ASCII ): # error_message = ( # f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different." # + f"\n>>>agent.state.functions\n{json.dumps(f_schema, indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # + f"\n>>>function library\n{json.dumps(linked_function['json_schema'], indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # ) schema_diff = get_schema_diff(f_schema, linked_function["json_schema"]) error_message = ( f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different.\n" + "".join(schema_diff) ) # NOTE to handle old configs, instead of erroring here let's just warn # raise ValueError(error_message) printd(error_message) linked_function_set[f_name] = linked_function return linked_function_set def initialize_memory(ai_notes, human_notes): if ai_notes is None: raise ValueError(ai_notes) if human_notes is None: raise ValueError(human_notes) memory = InContextMemory(human_char_limit=CORE_MEMORY_HUMAN_CHAR_LIMIT, persona_char_limit=CORE_MEMORY_PERSONA_CHAR_LIMIT) memory.edit_persona(ai_notes) memory.edit_human(human_notes) return memory def construct_system_with_memory(system, memory, memory_edit_timestamp, archival_memory=None, recall_memory=None, include_char_count=True): full_system_message = "\n".join( [ system, "\n", f"### Memory [last modified: {memory_edit_timestamp.strip()}]", f"{len(recall_memory) if recall_memory else 0} previous messages between you and the user are stored in recall memory (use functions to access them)", f"{len(archival_memory) if archival_memory else 0} total memories you created are stored in archival memory (use functions to access them)", "\nCore memory shown below (limited in size, additional information stored in archival / recall memory):", f'<persona characters="{len(memory.persona)}/{memory.persona_char_limit}">' if include_char_count else "<persona>", memory.persona, "</persona>", f'<human characters="{len(memory.human)}/{memory.human_char_limit}">' if include_char_count else "<human>", memory.human, "</human>", ] ) return full_system_message def initialize_message_sequence( model, system, memory, archival_memory=None, recall_memory=None, memory_edit_timestamp=None, include_initial_boot_message=True, ): if memory_edit_timestamp is None: memory_edit_timestamp = get_local_time() full_system_message = construct_system_with_memory( system, memory, memory_edit_timestamp, archival_memory=archival_memory, recall_memory=recall_memory ) first_user_message = get_login_event() # event letting MemGPT know the user just logged in if include_initial_boot_message: if model is not None and "gpt-3.5" in model: initial_boot_messages = get_initial_boot_messages("startup_with_send_message_gpt35") else: initial_boot_messages = get_initial_boot_messages("startup_with_send_message") messages = ( [ {"role": "system", "content": full_system_message}, ] + initial_boot_messages + [ {"role": "user", "content": first_user_message}, ] ) else: messages = [ {"role": "system", "content": full_system_message}, {"role": "user", "content": first_user_message}, ] return messages class Agent(object): def __init__( self, agent_state: AgentState, interface: AgentInterface, # extras messages_total=None, # TODO remove? first_message_verify_mono=True, # TODO move to config? memgpt_config: MemGPTConfig = None, ): # Hold a copy of the state that was used to init the agent self.agent_state = agent_state # gpt-4, gpt-3.5-turbo, ... self.model = agent_state.llm_config.model # Store the system instructions (used to rebuild memory) if "system" not in agent_state.state: raise ValueError(f"'system' not found in provided AgentState") self.system = agent_state.state["system"] if "functions" not in agent_state.state: raise ValueError(f"'functions' not found in provided AgentState") # Store the functions schemas (this is passed as an argument to ChatCompletion) self.functions = agent_state.state["functions"] # these are the schema # Link the actual python functions corresponding to the schemas self.functions_python = {k: v["python_function"] for k, v in link_functions(function_schemas=self.functions).items()} assert all([callable(f) for k, f in self.functions_python.items()]), self.functions_python # Initialize the memory object if "persona" not in agent_state.state: raise ValueError(f"'persona' not found in provided AgentState") if "human" not in agent_state.state: raise ValueError(f"'human' not found in provided AgentState") self.memory = initialize_memory(ai_notes=agent_state.state["persona"], human_notes=agent_state.state["human"]) # Interface must implement: # - internal_monologue # - assistant_message # - function_message # ... # Different interfaces can handle events differently # e.g., print in CLI vs send a discord message with a discord bot self.interface = interface # Create the persistence manager object based on the AgentState info # TODO self.persistence_manager = LocalStateManager(agent_state=agent_state) # State needed for heartbeat pausing self.pause_heartbeats_start = None self.pause_heartbeats_minutes = 0 self.first_message_verify_mono = first_message_verify_mono # Controls if the convo memory pressure warning is triggered # When an alert is sent in the message queue, set this to True (to avoid repeat alerts) # When the summarizer is run, set this back to False (to reset) self.agent_alerted_about_memory_pressure = False # Read local config if not provided if not memgpt_config: self.memgpt_config = MemGPTConfig() else: self.memgpt_config = memgpt_config # Initialize connection to metedata store # self.ms = MetadataStore(self.memgpt_config) # Once the memory object is initialized, use it to "bake" the system message if "messages" in agent_state.state and agent_state.state["messages"] is not None: # print(f"Agent.__init__ :: loading, state={agent_state.state['messages']}") if not isinstance(agent_state.state["messages"], list): raise ValueError(f"'messages' in AgentState was bad type: {type(agent_state.state['messages'])}") assert all([isinstance(msg, str) for msg in agent_state.state["messages"]]) # Convert to IDs, and pull from the database self._messages = [ self.persistence_manager.recall_memory.storage.get(uuid.UUID(msg_id)) for msg_id in agent_state.state["messages"] ] assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, agent_state.state["messages"]) else: # print(f"Agent.__init__ :: creating, state={agent_state.state['messages']}") init_messages = initialize_message_sequence( self.model, self.system, self.memory, ) init_messages_objs = [] for msg in init_messages: init_messages_objs.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=msg ) ) self._messages = [] self.messages_total = 0 self._append_to_messages(added_messages=init_messages_objs) assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, init_messages) # Keep track of the total number of messages throughout all time self.messages_total = messages_total if messages_total is not None else (len(self._messages) - 1) # (-system) # self.messages_total_init = self.messages_total self.messages_total_init = len(self._messages) - 1 printd(f"Agent initialized, self.messages_total={self.messages_total}") # Create the agent in the DB # self.save() self.update_state() @property def messages(self) -> List[dict]: """Getter method that converts the internal Message list into OpenAI-style dicts""" return [msg.to_openai_dict() for msg in self._messages] @messages.setter def messages(self, value): raise Exception("Modifying message list directly not allowed") def _trim_messages(self, num): """Trim messages from the front, not including the system message""" self.persistence_manager.trim_messages(num) new_messages = [self.messages[0]] + self.messages[num:] self._messages = new_messages def _prepend_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.prepend to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.prepend_to_messages(added_messages) new_messages = [self.messages[0]] + added_messages + self.messages[1:] # prepend (no system) self._messages = new_messages self.messages_total += len(added_messages) # still should increment the message counter (summaries are additions too) def _append_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.append to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.append_to_messages(added_messages) # strip extra metadata if it exists # for msg in added_messages: # msg.pop("api_response", None) # msg.pop("api_args", None) new_messages = self._messages + added_messages # append self._messages = new_messages self.messages_total += len(added_messages) def _swap_system_message(self, new_system_message: Message): assert isinstance(new_system_message, Message) assert new_system_message.role == "system", new_system_message assert self._messages[0].role == "system", self._messages self.persistence_manager.swap_system_message(new_system_message) new_messages = [new_system_message] + self._messages[1:] # swap index 0 (system) self._messages = new_messages def _get_ai_reply( self, message_sequence: List[dict], function_call: str = "auto", first_message: bool = False, # hint ) -> chat_completion_response.ChatCompletionResponse: """Get response from LLM API""" try: response = create( agent_state=self.agent_state, messages=message_sequence, functions=self.functions, functions_python=self.functions_python, function_call=function_call, # hint first_message=first_message, ) # special case for 'length' if response.choices[0].finish_reason == "length": raise Exception("Finish reason was length (maximum context length)") # catches for soft errors if response.choices[0].finish_reason not in ["stop", "function_call", "tool_calls"]: raise Exception(f"API call finish with bad finish reason: {response}") # unpack with response.choices[0].message.content return response except Exception as e: raise e def _handle_ai_response( self, response_message: chat_completion_response.Message, override_tool_call_id: bool = True ) -> Tuple[List[Message], bool, bool]: """Handles parsing and function execution""" messages = [] # append these to the history when done # Step 2: check if LLM wanted to call a function if response_message.function_call or (response_message.tool_calls is not None and len(response_message.tool_calls) > 0): if response_message.function_call: raise DeprecationWarning(response_message) if response_message.tool_calls is not None and len(response_message.tool_calls) > 1: raise NotImplementedError(f">1 tool call not supported") # The content if then internal monologue, not chat self.interface.internal_monologue(response_message.content) # generate UUID for tool call if override_tool_call_id or response_message.function_call: tool_call_id = get_tool_call_id() # needs to be a string for JSON response_message.tool_calls[0].id = tool_call_id else: tool_call_id = response_message.tool_calls[0].id assert tool_call_id is not None # should be defined # only necessary to add the tool_cal_id to a function call (antipattern) # response_message_dict = response_message.model_dump() # response_message_dict["tool_call_id"] = tool_call_id # role: assistant (requesting tool call, set tool call ID) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply printd(f"Function call message: {messages[-1]}") # Step 3: call the function # Note: the JSON response may not always be valid; be sure to handle errors # Failure case 1: function name is wrong function_call = ( response_message.function_call if response_message.function_call is not None else response_message.tool_calls[0].function ) function_name = function_call.name printd(f"Request to call function {function_name} with tool_call_id: {tool_call_id}") try: function_to_call = self.functions_python[function_name] except KeyError as e: error_msg = f"No function named {function_name}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # Failure case 2: function name is OK, but function args are bad JSON try: raw_function_args = function_call.arguments function_args = parse_json(raw_function_args) except Exception as e: error_msg = f"Error parsing JSON for function '{function_name}' arguments: {raw_function_args}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # (Still parsing function args) # Handle requests for immediate heartbeat heartbeat_request = function_args.pop("request_heartbeat", None) if not (isinstance(heartbeat_request, bool) or heartbeat_request is None): printd( f"{CLI_WARNING_PREFIX}'request_heartbeat' arg parsed was not a bool or None, type={type(heartbeat_request)}, value={heartbeat_request}" ) heartbeat_request = None # Failure case 3: function failed during execution self.interface.function_message(f"Running {function_name}({function_args})") try: spec = inspect.getfullargspec(function_to_call).annotations for name, arg in function_args.items(): if isinstance(function_args[name], dict): function_args[name] = spec[name](**function_args[name]) function_args["self"] = self # need to attach self to arg since it's dynamically linked function_response = function_to_call(**function_args) if function_name in ["conversation_search", "conversation_search_date", "archival_memory_search"]: # with certain functions we rely on the paging mechanism to handle overflow truncate = False else: # but by default, we add a truncation safeguard to prevent bad functions from # overflow the agent context window truncate = True function_response_string = validate_function_response(function_response, truncate=truncate) function_args.pop("self", None) function_response = package_function_response(True, function_response_string) function_failed = False except Exception as e: function_args.pop("self", None) # error_msg = f"Error calling function {function_name} with args {function_args}: {str(e)}" # Less detailed - don't provide full args, idea is that it should be in recent context so no need (just adds noise) error_msg = f"Error calling function {function_name}: {str(e)}" error_msg_user = f"{error_msg}\n{traceback.format_exc()}" printd(error_msg_user) function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # If no failures happened along the way: ... # Step 4: send the info on the function call and function response to GPT self.interface.function_message(f"Success: {function_response_string}") messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default
print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}")
30
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_s...
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from diffusers.models import AutoencoderKL from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from copy import deepcopy from tqdm import tqdm from diffusion import IDDPM from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.lcm_scheduler import LCMScheduler from torchvision.utils import save_image from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,474
w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args()
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def ema_update(model_dest: nn.Module, model_src: nn.Module, rate): param_dict_src = dict(model_src.named_parameters()) for p_name, p_dest in model_dest.named_parameters(): p_src = param_dict_src[p_name] assert p_src is not p_dest p_dest.data.mul_(rate).add_((1 - rate) * p_src.data) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def log_validation(model, step, device): if hasattr(model, 'module'): model = model.module scheduler = LCMScheduler(beta_start=0.0001, beta_end=0.02, beta_schedule="linear", prediction_type="epsilon") scheduler.set_timesteps(4, 50) infer_timesteps = scheduler.timesteps dog_embed = torch.load('data/tmp/dog.pth', map_location='cpu') caption_embs, emb_masks = dog_embed['dog_text'].to(device), dog_embed['dog_mask'].to(device) hw = torch.tensor([[1024, 1024]], dtype=torch.float, device=device).repeat(1, 1) ar = torch.tensor([[1.]], device=device).repeat(1, 1) # Create sampling noise: infer_latents = torch.randn(1, 4, 1024, 1024, device=device) model_kwargs = dict(data_info={'img_hw': hw, 'aspect_ratio': ar}, mask=emb_masks) logger.info("Running validation... ") # 7. LCM MultiStep Sampling Loop: for i, t in tqdm(list(enumerate(infer_timesteps))): ts = torch.full((1,), t, device=device, dtype=torch.long) # model prediction (v-prediction, eps, x) model_pred = model(infer_latents, ts, caption_embs, **model_kwargs)[:, :4] # compute the previous noisy sample x_t -> x_t-1 infer_latents, denoised = scheduler.step(model_pred, i, t, infer_latents, return_dict=False) samples = vae.decode(denoised / 0.18215).sample torch.cuda.empty_cache() save_image(samples[0], f'output_cv/vis/{step}.jpg', nrow=1, normalize=True, value_range=(-1, 1)) def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = model.module.y_embedder.y_embedding.repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args()
config = read_config(args.config)
12
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
datasets/nuscenes.py
[ { "identifier": "SceneLidarSource", "path": "datasets/base/lidar_source.py", "snippet": "class SceneLidarSource(abc.ABC):\n \"\"\"\n The base class for the lidar source of a scene.\n \"\"\"\n\n data_cfg: OmegaConf = None\n # the normalized timestamps of all points (normalized to [0, 1]), ...
import json import logging import os import numpy as np import torch from typing import Dict from nuscenes.nuscenes import LidarPointCloud, NuScenes from omegaconf import OmegaConf from pyquaternion import Quaternion from torch import Tensor from tqdm import trange from datasets.base.lidar_source import SceneLidarSource from datasets.base.pixel_source import ScenePixelSource from datasets.base.scene_dataset import SceneDataset from datasets.base.split_wrapper import SplitWrapper from datasets.utils import voxel_coords_to_world_coords from radiance_fields.video_utils import save_videos, depth_visualizer from utils.misc import NumpyEncoder
17,396
split_indices=np.arange(self.pixel_source.num_imgs).tolist(), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if self.lidar_source is not None: train_lidar_set = SplitWrapper( datasource=self.lidar_source, # the number of image timesteps is different from the number of lidar timesteps # TODO: find a better way to handle this # currently use all the lidar timesteps for training split_indices=np.arange(self.lidar_source.num_timesteps), split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_lidar_set = SplitWrapper( datasource=self.lidar_source, # cover all the lidar scans split_indices=np.arange(self.lidar_source.num_timesteps), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) pixel_set = (train_pixel_set, test_pixel_set, full_pixel_set) lidar_set = (train_lidar_set, test_lidar_set, full_lidar_set) return pixel_set, lidar_set def build_data_source(self): pixel_source, lidar_source = None, None all_timestamps = [] # ---- create pixel source ---- # load_pixel = ( self.data_cfg.pixel_source.load_rgb or self.data_cfg.pixel_source.load_sky_mask or self.data_cfg.pixel_source.load_dynamic_mask or self.data_cfg.pixel_source.load_feature ) if load_pixel: pixel_source = NuScenesPixelSource( pixel_data_config=self.data_cfg.pixel_source, data_path=self.data_path, scene_idx=self.scene_idx, meta_file_path=self.img_meta_file_path, start_timestep=self.data_cfg.start_timestep, end_timestep=self.data_cfg.end_timestep, ) pixel_source.to(self.device) all_timestamps.append(pixel_source.timestamps) self.start_timestep = pixel_source.start_timestep self.end_timestep = pixel_source.end_timestep self.scene_fraction = pixel_source.scene_fraction # ---- create lidar source ---- # if self.data_cfg.lidar_source.load_lidar: lidar_source = NuScenesLiDARSource( lidar_data_config=self.data_cfg.lidar_source, data_path=self.data_path, meta_file_path=self.lidar_meta_file_path, nusc=pixel_source.nusc if pixel_source is not None else None, scene_idx=self.scene_idx, start_timestep=self.start_timestep, fraction=self.scene_fraction, global_to_initial_ego=pixel_source.global_to_initial_ego, ) lidar_source.to(self.device) all_timestamps.append(lidar_source.timestamps) assert len(all_timestamps) > 0, "No data source is loaded" all_timestamps = torch.cat(all_timestamps, dim=0) # normalize the timestamps all_timestamps = (all_timestamps - all_timestamps.min()) / ( all_timestamps.max() - all_timestamps.min() ) all_timestamps = all_timestamps.float() if pixel_source is not None: pixel_source.register_normalized_timestamps( all_timestamps[: len(pixel_source.timestamps)] ) if lidar_source is not None: lidar_source.register_normalized_timestamps( all_timestamps[-len(lidar_source.timestamps) :] ) return pixel_source, lidar_source def split_train_test(self): assert ( self.data_cfg.pixel_source.test_image_stride == 0 ), "test_image_stride > 0 is not supported for nuscenes dataset. " if self.data_cfg.pixel_source.test_image_stride != 0: test_timesteps = np.arange( self.data_cfg.pixel_source.test_image_stride, self.num_img_timesteps, self.data_cfg.pixel_source.test_image_stride, ) else: test_timesteps = [] train_timesteps = np.array( [i for i in range(self.num_img_timesteps) if i not in test_timesteps] ) logger.info( f"Train timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[train_timesteps]}" ) logger.info( f"Test timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[test_timesteps]}" ) # propagate the train and test timesteps to the train and test indices train_indices, test_indices = [], [] for t in range(self.num_img_timesteps): if t in train_timesteps: for cam in range(self.pixel_source.num_cams): train_indices.append(t * self.pixel_source.num_cams + cam) elif t in test_timesteps: for cam in range(self.pixel_source.num_cams): test_indices.append(t * self.pixel_source.num_cams + cam) logger.info(f"Number of train indices: {len(train_indices)}") logger.info(f"Train indices: {train_indices}") logger.info(f"Number of test indices: {len(test_indices)}") logger.info(f"Test indices: {test_indices}") return train_timesteps, test_timesteps, train_indices, test_indices
logger = logging.getLogger() class NuScenesPixelSource(ScenePixelSource): ORIGINAL_SIZE = [[900, 1600] for _ in range(6)] OPENCV2DATASET = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) def __init__( self, pixel_data_config: OmegaConf, data_path: str, meta_file_path: str, nusc: NuScenes = None, scene_idx: int = 0, start_timestep: int = 0, end_timestep: int = -1, device: torch.device = torch.device("cpu"), ): pixel_data_config.load_dynamic_mask = False logger.info("[Pixel] Overriding load_dynamic_mask to False") super().__init__(pixel_data_config, device=device) self.data_path = data_path self.meta_file_path = meta_file_path self.start_timestep = start_timestep self.end_timestep = end_timestep self.nusc = nusc self.scene_idx = scene_idx self.meta_dict = self.create_or_load_metas() self.create_all_filelist() self.load_data() def create_or_load_metas(self): # ---- define camera list ---- # if self.num_cams == 1: self.camera_list = ["CAM_FRONT"] elif self.num_cams == 3: self.camera_list = ["CAM_FRONT_LEFT", "CAM_FRONT", "CAM_FRONT_RIGHT"] elif self.num_cams == 6: self.camera_list = [ "CAM_FRONT_LEFT", "CAM_FRONT", "CAM_FRONT_RIGHT", "CAM_BACK_LEFT", "CAM_BACK", "CAM_BACK_RIGHT", ] else: raise NotImplementedError( f"num_cams: {self.num_cams} not supported for nuscenes dataset" ) if os.path.exists(self.meta_file_path): with open(self.meta_file_path, "r") as f: meta_dict = json.load(f) logger.info(f"[Pixel] Loaded camera meta from {self.meta_file_path}") return meta_dict else: logger.info(f"[Pixel] Creating camera meta at {self.meta_file_path}") if self.nusc is None: self.nusc = NuScenes( version="v1.0-trainval", dataroot=self.data_path, verbose=True ) self.scene = self.nusc.scene[self.scene_idx] total_camera_list = [ "CAM_FRONT_LEFT", "CAM_FRONT", "CAM_FRONT_RIGHT", "CAM_BACK_LEFT", "CAM_BACK", "CAM_BACK_RIGHT", ] meta_dict = { camera: { "timestamp": [], "filepath": [], "ego_pose": [], "cam_id": [], "extrinsics": [], "intrinsics": [], } for i, camera in enumerate(total_camera_list) } # ---- get the first sample of each camera ---- # current_camera_data_tokens = {camera: None for camera in total_camera_list} first_sample = self.nusc.get("sample", self.scene["first_sample_token"]) for camera in total_camera_list: current_camera_data_tokens[camera] = first_sample["data"][camera] while not all(token == "" for token in current_camera_data_tokens.values()): for i, camera in enumerate(total_camera_list): # skip if the current camera data token is empty if current_camera_data_tokens[camera] == "": continue current_camera_data = self.nusc.get( "sample_data", current_camera_data_tokens[camera] ) # ---- timestamp and cam_id ---- # meta_dict[camera]["cam_id"].append(i) meta_dict[camera]["timestamp"].append(current_camera_data["timestamp"]) meta_dict[camera]["filepath"].append(current_camera_data["filename"]) # ---- intrinsics and extrinsics ---- # calibrated_sensor_record = self.nusc.get( "calibrated_sensor", current_camera_data["calibrated_sensor_token"] ) # intrinsics intrinsic = calibrated_sensor_record["camera_intrinsic"] meta_dict[camera]["intrinsics"].append(np.array(intrinsic)) # extrinsics extrinsic = np.eye(4) extrinsic[:3, :3] = Quaternion( calibrated_sensor_record["rotation"] ).rotation_matrix extrinsic[:3, 3] = np.array(calibrated_sensor_record["translation"]) meta_dict[camera]["extrinsics"].append(extrinsic) # ---- ego pose ---- # ego_pose_record = self.nusc.get( "ego_pose", current_camera_data["ego_pose_token"] ) ego_pose = np.eye(4) ego_pose[:3, :3] = Quaternion( ego_pose_record["rotation"] ).rotation_matrix ego_pose[:3, 3] = np.array(ego_pose_record["translation"]) meta_dict[camera]["ego_pose"].append(ego_pose) current_camera_data_tokens[camera] = current_camera_data["next"] with open(self.meta_file_path, "w") as f: json.dump(meta_dict, f, cls=NumpyEncoder) logger.info(f"[Pixel] Saved camera meta to {self.meta_file_path}") return meta_dict def create_all_filelist(self): # NuScenes dataset is not synchronized, so we need to find the minimum shared # scene length, and only use the frames within the shared scene length. # we also define the start and end timestep within the shared scene length # ---- find the minimum shared scene length ---- # num_timestamps = 100000000 for camera in self.camera_list: if len(self.meta_dict[camera]["timestamp"]) < num_timestamps: num_timestamps = len(self.meta_dict[camera]["timestamp"]) logger.info(f"[Pixel] Min shared scene length: {num_timestamps}") self.scene_total_num_timestamps = num_timestamps if self.end_timestep == -1: self.end_timestep = num_timestamps - 1 else: self.end_timestep = min(self.end_timestep, num_timestamps - 1) # to make sure the last timestep is included self.end_timestep += 1 self.start_timestep = min(self.start_timestep, self.end_timestep - 1) self.scene_fraction = (self.end_timestep - self.start_timestep) / num_timestamps logger.info(f"[Pixel] Start timestep: {self.start_timestep}") logger.info(f"[Pixel] End timestep: {self.end_timestep}") img_filepaths, feat_filepaths, sky_mask_filepaths = [], [], [] # TODO: support dynamic masks for t in range(self.start_timestep, self.end_timestep): for cam_idx in self.camera_list: img_filepath = os.path.join( self.data_path, self.meta_dict[cam_idx]["filepath"][t] ) img_filepaths.append(img_filepath) sky_mask_filepaths.append( img_filepath.replace("samples", "samples_sky_mask") .replace("sweeps", "sweeps_sky_mask") .replace(".jpg", ".png") ) feat_filepaths.append( img_filepath.replace( "samples", f"samples_{self.data_cfg.feature_model_type}" ) .replace("sweeps", f"sweeps_{self.data_cfg.feature_model_type}") .replace(".jpg", ".npy") ) self.img_filepaths = np.array(img_filepaths) self.sky_mask_filepaths = np.array(sky_mask_filepaths) self.feat_filepaths = np.array(feat_filepaths) def load_calibrations(self): # compute per-image poses and intrinsics cam_to_worlds, ego_to_worlds = [], [] intrinsics, timesteps, cam_ids = [], [], [] timestamps = [] # we tranform the camera poses w.r.t. the first timestep to make the origin of # the first ego pose as the origin of the world coordinate system. initial_ego_to_global = self.meta_dict["CAM_FRONT"]["ego_pose"][ self.start_timestep ] global_to_initial_ego = np.linalg.inv(initial_ego_to_global) for t in range(self.start_timestep, self.end_timestep): ego_to_global_current = self.meta_dict["CAM_FRONT"]["ego_pose"][t] # compute ego_to_world transformation ego_to_world = global_to_initial_ego @ ego_to_global_current ego_to_worlds.append(ego_to_world) for cam_name in self.camera_list: cam_to_ego = self.meta_dict[cam_name]["extrinsics"][t] # Because we use opencv coordinate system to generate camera rays, # we need to store the transformation from opencv coordinate system to dataset # coordinate system. However, the nuScenes dataset uses the same coordinate # system as opencv, so we just store the identity matrix. # opencv coordinate system: x right, y down, z front cam_to_ego = cam_to_ego @ self.OPENCV2DATASET cam2world = ego_to_world @ cam_to_ego cam_to_worlds.append(cam2world) intrinsics.append(self.meta_dict[cam_name]["intrinsics"][t]) timesteps.append(t) cam_ids.append(self.meta_dict[cam_name]["cam_id"][t]) timestamps.append( self.meta_dict[cam_name]["timestamp"][t] / 1e6 * np.ones_like(self.meta_dict[cam_name]["cam_id"][t]) ) self.intrinsics = torch.from_numpy(np.stack(intrinsics, axis=0)).float() # scale the intrinsics according to the load size self.intrinsics[..., 0, 0] *= ( self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[0][1] ) self.intrinsics[..., 1, 1] *= ( self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[0][0] ) self.intrinsics[..., 0, 2] *= ( self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[0][1] ) self.intrinsics[..., 1, 2] *= ( self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[0][0] ) self.cam_to_worlds = torch.from_numpy(np.stack(cam_to_worlds, axis=0)).float() self.ego_to_worlds = torch.from_numpy(np.stack(ego_to_worlds, axis=0)).float() self.global_to_initial_ego = torch.from_numpy(global_to_initial_ego).float() self.cam_ids = torch.from_numpy(np.stack(cam_ids, axis=0)).long() # the underscore here is important. self._timestamps = torch.tensor(timestamps, dtype=torch.float64) self._timesteps = torch.from_numpy(np.stack(timesteps, axis=0)).long() class NuScenesLiDARSource(SceneLidarSource): def __init__( self, lidar_data_config: OmegaConf, data_path: str, meta_file_path: str, nusc: NuScenes, scene_idx: int, start_timestep: int, fraction: float, # a value in [0, 1] to indicate the fraction of the scene to use global_to_initial_ego: Tensor, ): super().__init__(lidar_data_config) self.data_path = data_path self.meta_file_path = meta_file_path self.nusc = nusc self.scene_idx = scene_idx self.start_timestep = start_timestep # because the lidar data is not synchronized with the image data, we need to # define the end timestep based on the fraction of the scene to use self.fraction = fraction self.global_to_initial_ego = global_to_initial_ego.numpy() self.meta_dict = self.create_or_load_metas() self.create_all_filelist() self.load_data() def create_or_load_metas(self): if os.path.exists(self.meta_file_path): with open(self.meta_file_path, "r") as f: meta_dict = json.load(f) logger.info(f"[Lidar] Loaded lidar meta from {self.meta_file_path}") return meta_dict else: logger.info(f"[Lidar] Creating lidar meta at {self.meta_file_path}") if self.nusc is None: self.nusc = NuScenes( version="v1.0-trainval", dataroot=self.data_path, verbose=True ) self.scene = self.nusc.scene[self.scene_idx] meta_dict = { "timestamp": [], "filepath": [], "extrinsics": [], "ego_pose": [], } # ---- obtain initial pose ---- # first_sample = self.nusc.get("sample", self.scene["first_sample_token"]) current_data_token = first_sample["data"]["LIDAR_TOP"] while current_data_token != "": current_lidar_data = self.nusc.get("sample_data", current_data_token) # ---- timestamp and cam_id ---- # meta_dict["timestamp"].append(current_lidar_data["timestamp"]) meta_dict["filepath"].append(current_lidar_data["filename"]) # ---- extrinsics ---- # calibrated_sensor_record = self.nusc.get( "calibrated_sensor", current_lidar_data["calibrated_sensor_token"] ) extrinsic = np.eye(4) extrinsic[:3, :3] = Quaternion( calibrated_sensor_record["rotation"] ).rotation_matrix extrinsic[:3, 3] = np.array(calibrated_sensor_record["translation"]) meta_dict["extrinsics"].append(extrinsic) # ---- ego pose ---- # ego_pose_record = self.nusc.get( "ego_pose", current_lidar_data["ego_pose_token"] ) ego_pose = np.eye(4) ego_pose[:3, :3] = Quaternion(ego_pose_record["rotation"]).rotation_matrix ego_pose[:3, 3] = np.array(ego_pose_record["translation"]) meta_dict["ego_pose"].append(ego_pose) current_data_token = current_lidar_data["next"] with open(self.meta_file_path, "w") as f: json.dump(meta_dict, f, cls=NumpyEncoder) logger.info(f"[Lidar] Saved lidar meta to {self.meta_file_path}") return meta_dict def create_all_filelist(self): # ---- define filepaths ---- # num_timestamps = len(self.meta_dict["timestamp"]) self.end_timestep = int(num_timestamps * self.fraction) self.start_timestep = min(self.start_timestep, self.end_timestep - 1) logger.info(f"[Lidar] Start timestep: {self.start_timestep}") logger.info(f"[Lidar] End timestep: {self.end_timestep}") lidar_filepaths = [] for t in range(self.start_timestep, self.end_timestep): lidar_filepaths.append( os.path.join(self.data_path, self.meta_dict["filepath"][t]) ) self.lidar_filepaths = np.array(lidar_filepaths) def load_calibrations(self): lidar_to_worlds, ego_to_worlds = [], [] # we tranform the poses w.r.t. the first timestep to make the origin of the # first ego pose as the origin of the world coordinate system. for t in range(self.start_timestep, self.end_timestep): lidar_to_ego = np.array(self.meta_dict["extrinsics"][t]) ego_to_global_current = np.array(self.meta_dict["ego_pose"][t]) # compute ego_to_world transformation ego_to_world = self.global_to_initial_ego @ ego_to_global_current ego_to_worlds.append(ego_to_world) lidar_to_worlds.append(ego_to_world @ lidar_to_ego) self.lidar_to_worlds = torch.from_numpy( np.stack(lidar_to_worlds, axis=0) ).float() self.ego_to_worlds = torch.from_numpy(np.stack(ego_to_worlds, axis=0)).float() def load_lidar(self): origins, directions, ranges, timesteps = [], [], [], [] laser_ids = [] timestamps = [] accumulated_num_original_rays = 0 accumulated_num_rays = 0 for t in trange( 0, len(self.lidar_filepaths), desc="Loading lidar", dynamic_ncols=True ): lidar_pc = LidarPointCloud.from_file(self.lidar_filepaths[t]) lidar_pc.remove_close(1.0) pc = lidar_pc.points[:3, :].T pc = np.hstack((pc, np.ones((pc.shape[0], 1)))) pc = torch.from_numpy(pc).float() lidar_points = pc @ self.lidar_to_worlds[t].T lidar_points = lidar_points[:, :3] lidar_origins = ( self.lidar_to_worlds[t][:3, 3] .unsqueeze(0) .repeat(lidar_points.shape[0], 1) ) lidar_directions = lidar_points - lidar_origins lidar_ranges = torch.norm(lidar_directions, dim=-1, keepdim=True) lidar_directions = lidar_directions / lidar_ranges accumulated_num_original_rays += len(lidar_pc.points[0]) valid_mask = torch.ones_like(lidar_origins[:, 0]).bool() if self.data_cfg.truncated_max_range is not None: valid_mask = lidar_points[:, 0] < self.data_cfg.truncated_max_range if self.data_cfg.truncated_min_range is not None: valid_mask = valid_mask & ( lidar_points[:, 0] > self.data_cfg.truncated_min_range ) lidar_origins = lidar_origins[valid_mask] lidar_directions = lidar_directions[valid_mask] lidar_ranges = lidar_ranges[valid_mask] lidar_timestep = torch.ones_like(lidar_ranges).squeeze(-1) * t lidar_ids = torch.zeros_like(lidar_origins[:, 0]).long() accumulated_num_rays += len(lidar_ranges) origins.append(lidar_origins) directions.append(lidar_directions) ranges.append(lidar_ranges) timesteps.append(lidar_timestep) laser_ids.append(lidar_ids) timestamps.append( self.meta_dict["timestamp"][t] / 1e6 * torch.ones_like(lidar_ids, dtype=torch.float64) ) logger.info( f"[Lidar] Number of lidar rays: {accumulated_num_rays} " f"({accumulated_num_rays / accumulated_num_original_rays * 100:.2f}%) of " f"{accumulated_num_original_rays} original rays)" ) logger.info("[Lidar] Filter condition:") logger.info(f" only_use_top_lidar: {self.data_cfg.only_use_top_lidar}") logger.info(f" truncated_max_range: {self.data_cfg.truncated_max_range}") logger.info(f" truncated_min_range: {self.data_cfg.truncated_min_range}") self.origins = torch.cat(origins, dim=0) self.directions = torch.cat(directions, dim=0) self.ranges = torch.cat(ranges, dim=0) self._timesteps = torch.cat(timesteps, dim=0) self.laser_ids = torch.cat(laser_ids, dim=0) self._timestamps = torch.cat(timestamps, dim=0) def sample_uniform_rays( self, num_rays: int, candidate_indices: Tensor = None, ): # in nuscenes, we don't support novel view synthesis yet, so we don't need to # use candidate indices self.cached_origins = self.origins self.cached_directions = self.directions self.cached_ranges = self.ranges self.cached_normalized_timestamps = self.normalized_timestamps return torch.randint( 0, len(self.cached_origins), size=(num_rays,), device=self.device, ) class NuScenesDataset(SceneDataset): dataset: str = "nuscenes" def __init__( self, data_cfg: OmegaConf, ) -> None: super().__init__(data_cfg) assert self.data_cfg.dataset == "nuscenes" self.data_path = self.data_cfg.data_root self.processed_data_path = os.path.join( self.data_path, "emernerf_metas", f"{self.scene_idx:03d}" ) if not os.path.exists(self.processed_data_path): os.makedirs(self.processed_data_path) self.img_meta_file_path = os.path.join( self.processed_data_path, "img_meta.json" ) self.lidar_meta_file_path = os.path.join( self.processed_data_path, "lidar_meta.json" ) # ---- create pixel source ---- # self.pixel_source, self.lidar_source = self.build_data_source() self.aabb = self.get_aabb() # ---- define train and test indices ---- # ( self.train_timesteps, self.test_timesteps, self.train_indices, self.test_indices, ) = self.split_train_test() # ---- create split wrappers ---- # pixel_sets, lidar_sets = self.build_split_wrapper() self.train_pixel_set, self.test_pixel_set, self.full_pixel_set = pixel_sets self.train_lidar_set, self.test_lidar_set, self.full_lidar_set = lidar_sets def build_split_wrapper(self): """ Makes each data source as a Pytorch Dataset """ train_pixel_set, test_pixel_set, full_pixel_set = None, None, None train_lidar_set, test_lidar_set, full_lidar_set = None, None, None assert ( len(self.test_indices) == 0 ), "Test split is not supported yet for nuscenes" # ---- create split wrappers ---- # if self.pixel_source is not None: train_pixel_set = SplitWrapper( datasource=self.pixel_source, # train_indices are img indices, so the length is num_cams * num_train_timesteps split_indices=self.train_indices, split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_pixel_set = SplitWrapper( datasource=self.pixel_source, # cover all the images split_indices=np.arange(self.pixel_source.num_imgs).tolist(), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if self.lidar_source is not None: train_lidar_set = SplitWrapper( datasource=self.lidar_source, # the number of image timesteps is different from the number of lidar timesteps # TODO: find a better way to handle this # currently use all the lidar timesteps for training split_indices=np.arange(self.lidar_source.num_timesteps), split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_lidar_set = SplitWrapper( datasource=self.lidar_source, # cover all the lidar scans split_indices=np.arange(self.lidar_source.num_timesteps), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) pixel_set = (train_pixel_set, test_pixel_set, full_pixel_set) lidar_set = (train_lidar_set, test_lidar_set, full_lidar_set) return pixel_set, lidar_set def build_data_source(self): pixel_source, lidar_source = None, None all_timestamps = [] # ---- create pixel source ---- # load_pixel = ( self.data_cfg.pixel_source.load_rgb or self.data_cfg.pixel_source.load_sky_mask or self.data_cfg.pixel_source.load_dynamic_mask or self.data_cfg.pixel_source.load_feature ) if load_pixel: pixel_source = NuScenesPixelSource( pixel_data_config=self.data_cfg.pixel_source, data_path=self.data_path, scene_idx=self.scene_idx, meta_file_path=self.img_meta_file_path, start_timestep=self.data_cfg.start_timestep, end_timestep=self.data_cfg.end_timestep, ) pixel_source.to(self.device) all_timestamps.append(pixel_source.timestamps) self.start_timestep = pixel_source.start_timestep self.end_timestep = pixel_source.end_timestep self.scene_fraction = pixel_source.scene_fraction # ---- create lidar source ---- # if self.data_cfg.lidar_source.load_lidar: lidar_source = NuScenesLiDARSource( lidar_data_config=self.data_cfg.lidar_source, data_path=self.data_path, meta_file_path=self.lidar_meta_file_path, nusc=pixel_source.nusc if pixel_source is not None else None, scene_idx=self.scene_idx, start_timestep=self.start_timestep, fraction=self.scene_fraction, global_to_initial_ego=pixel_source.global_to_initial_ego, ) lidar_source.to(self.device) all_timestamps.append(lidar_source.timestamps) assert len(all_timestamps) > 0, "No data source is loaded" all_timestamps = torch.cat(all_timestamps, dim=0) # normalize the timestamps all_timestamps = (all_timestamps - all_timestamps.min()) / ( all_timestamps.max() - all_timestamps.min() ) all_timestamps = all_timestamps.float() if pixel_source is not None: pixel_source.register_normalized_timestamps( all_timestamps[: len(pixel_source.timestamps)] ) if lidar_source is not None: lidar_source.register_normalized_timestamps( all_timestamps[-len(lidar_source.timestamps) :] ) return pixel_source, lidar_source def split_train_test(self): assert ( self.data_cfg.pixel_source.test_image_stride == 0 ), "test_image_stride > 0 is not supported for nuscenes dataset. " if self.data_cfg.pixel_source.test_image_stride != 0: test_timesteps = np.arange( self.data_cfg.pixel_source.test_image_stride, self.num_img_timesteps, self.data_cfg.pixel_source.test_image_stride, ) else: test_timesteps = [] train_timesteps = np.array( [i for i in range(self.num_img_timesteps) if i not in test_timesteps] ) logger.info( f"Train timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[train_timesteps]}" ) logger.info( f"Test timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[test_timesteps]}" ) # propagate the train and test timesteps to the train and test indices train_indices, test_indices = [], [] for t in range(self.num_img_timesteps): if t in train_timesteps: for cam in range(self.pixel_source.num_cams): train_indices.append(t * self.pixel_source.num_cams + cam) elif t in test_timesteps: for cam in range(self.pixel_source.num_cams): test_indices.append(t * self.pixel_source.num_cams + cam) logger.info(f"Number of train indices: {len(train_indices)}") logger.info(f"Train indices: {train_indices}") logger.info(f"Number of test indices: {len(test_indices)}") logger.info(f"Test indices: {test_indices}") return train_timesteps, test_timesteps, train_indices, test_indices
def save_videos(self, video_dict, **kwargs):
5
2023-10-11 20:56:27+00:00
24k
alibaba-damo-academy/FunCodec
funcodec/models/encoder/transformer_encoder.py
[ { "identifier": "AbsEncoder", "path": "funcodec/models/encoder/abs_encoder.py", "snippet": "class AbsEncoder(torch.nn.Module, ABC):\n @abstractmethod\n def output_size(self) -> int:\n raise NotImplementedError\n\n @abstractmethod\n def forward(\n self,\n xs_pad: torch.Te...
from typing import List from typing import Optional from typing import Tuple from torch import nn from funcodec.models.encoder.abs_encoder import AbsEncoder from funcodec.modules.attention import ( MultiHeadedAttention, RelPositionMultiHeadedAttention, # noqa: H301 LegacyRelPositionMultiHeadedAttention, # noqa: H301 ) from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.multi_layer_conv import Conv1dLinear from funcodec.modules.multi_layer_conv import MultiLayeredConv1d from funcodec.modules.nets_utils import make_pad_mask from funcodec.modules.embedding import ( PositionalEncoding, # noqa: H301 ScaledPositionalEncoding, # noqa: H301 RelPositionalEncoding, # noqa: H301 LegacyRelPositionalEncoding, # noqa: H301 ) from funcodec.modules.positionwise_feed_forward import ( PositionwiseFeedForward, # noqa: H301 ) from funcodec.modules.repeat import repeat from funcodec.modules.nets_utils import rename_state_dict from funcodec.modules.dynamic_conv import DynamicConvolution from funcodec.modules.dynamic_conv2d import DynamicConvolution2D from funcodec.modules.lightconv import LightweightConvolution from funcodec.modules.lightconv2d import LightweightConvolution2D from funcodec.modules.subsampling import Conv2dSubsampling from funcodec.modules.subsampling import Conv2dSubsampling2 from funcodec.modules.subsampling import Conv2dSubsampling6 from funcodec.modules.subsampling import Conv2dSubsampling8 from funcodec.modules.subsampling import TooShortUttError from funcodec.modules.subsampling import check_short_utt import torch import logging
18,117
skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask residual = x if self.normalize_before: x = self.norm1(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = residual + stoch_layer_coeff * self.dropout(x_att) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6":
# Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Transformer encoder definition.""" class EncoderLayer(nn.Module): """Encoder layer module. Args: size (int): Input dimension. self_attn (torch.nn.Module): Self-attention module instance. `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance can be used as the argument. feed_forward (torch.nn.Module): Feed-forward module instance. `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance can be used as the argument. dropout_rate (float): Dropout rate. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) stochastic_depth_rate (float): Proability to skip this layer. During training, the layer may skip residual computation and return input as-is with given probability. """ def __init__( self, size, self_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, stochastic_depth_rate=0.0, ): """Construct an EncoderLayer object.""" super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.norm1 = LayerNorm(size) self.norm2 = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) self.stochastic_depth_rate = stochastic_depth_rate def forward(self, x, mask, cache=None): """Compute encoded features. Args: x_input (torch.Tensor): Input tensor (#batch, time, size). mask (torch.Tensor): Mask tensor for the input (#batch, time). cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). Returns: torch.Tensor: Output tensor (#batch, time, size). torch.Tensor: Mask tensor (#batch, time). """ if isinstance(x, tuple): x, pos_emb = x[0], x[1] else: x, pos_emb = x, None skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask residual = x if self.normalize_before: x = self.norm1(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = residual + stoch_layer_coeff * self.dropout(x_att) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6":
self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate)
21
2023-10-07 02:00:40+00:00
24k
Beckschen/3D-TransUNet
nn_transunet/trainer/nnUNetTrainerV2_DDP.py
[ { "identifier": "nnUNetTrainerV2", "path": "nn_transunet/trainer/nnUNetTrainerV2.py", "snippet": "class nnUNetTrainerV2(nnUNetTrainer):\n \"\"\"\n Info for Fabian: same as internal nnUNetTrainerV2_2\n \"\"\"\n\n def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None,...
from genericpath import exists from _warnings import warn from collections import OrderedDict from multiprocessing import Pool from time import sleep, time from typing import Tuple from nnunet.configuration import default_num_threads from nnunet.evaluation.evaluator import aggregate_scores from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.utilities.distributed import awesome_allgather_function from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from nnunet.utilities.to_torch import to_cuda, maybe_to_torch from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.training.loss_functions.dice_loss import get_tp_fp_fn_tn from torch import nn, distributed from torch.backends import cudnn from torch.cuda.amp import autocast from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim.lr_scheduler import _LRScheduler from tqdm import trange from ..trainer.nnUNetTrainerV2 import nnUNetTrainerV2, InitWeights_He from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join, subfiles, isfile, load_pickle, \ save_json from ..data.data_augmentation_moreDA import get_moreDA_augmentation from ..data.dataset_loading import unpack_dataset from ..data.default_data_augmentation import default_2D_augmentation_params, get_patch_size, default_3D_augmentation_params from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp from nnunet.training.data_augmentation.data_augmentation_insaneDA2 import get_insaneDA_augmentation2 from ..optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR from torch.optim import lr_scheduler from network_trainer import warmup_poly_lr from network_trainer import poly_lr from ..networks.transunet3d_model import HungarianMatcher3D, compute_loss_hungarian from ..utils.dist_utils import check_call_hdfs_command, mkdir_hdfs import os import shutil import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F
17,458
self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_elastic"] = True self.data_aug_params["p_eldef"] = 0.3 # LMH 0.2 -> 0.3 according to paper self.data_aug_params["eldef_deformation_scale"] = (0, 0.25) self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 0.5 self.data_aug_params['gamma_range'] = (0.5, 1.6) self.data_aug_params['num_cached_per_thread'] = 4 def set_batch_size_and_oversample(self): batch_sizes = [] oversample_percents = [] world_size = self.args.world_size# dist.get_world_size() my_rank = self.args.rank # dist.get_rank() # not local_rank if self.args.total_batch_size: # actually it is global_batch_size # reset the batch_size per gpu accordingly self.batch_size = self.args.total_batch_size // world_size # if self.args.local_rank == 0: # print("total_batch_size: %d, updated batch_size per gpu %d, world_size %d" % (self.args.total_batch_size, self.batch_size, world_size)) if self.distribute_batch_size: # set total batch_size to 16 will be fine... self.global_batch_size = self.batch_size else: self.global_batch_size = self.batch_size * world_size batch_size_per_GPU = np.ceil(self.batch_size / world_size).astype(int) # probably 1 for rank in range(world_size): if self.distribute_batch_size: if (rank + 1) * batch_size_per_GPU > self.batch_size: batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - self.batch_size) else: batch_size = batch_size_per_GPU else: batch_size = self.batch_size batch_sizes.append(batch_size) sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1]) sample_id_high = np.sum(batch_sizes) if sample_id_high / self.global_batch_size < (1 - self.oversample_foreground_percent): oversample_percents.append(0.0) elif sample_id_low / self.global_batch_size > (1 - self.oversample_foreground_percent): oversample_percents.append(1.0) else: percent_covered_by_this_rank = sample_id_high / self.global_batch_size - sample_id_low / self.global_batch_size oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) - sample_id_low / self.global_batch_size) / percent_covered_by_this_rank) oversample_percents.append(oversample_percent_here) print("worker", my_rank, "oversample", oversample_percents[my_rank]) print("worker", my_rank, "batch_size", batch_sizes[my_rank]) # batch_sizes [self.batch_size]*world_size self.batch_size = batch_sizes[my_rank] self.oversample_foreground_percent = oversample_percents[my_rank] def save_checkpoint(self, fname, save_optimizer=True): if self.local_rank == 0: super().save_checkpoint(fname, save_optimizer) def plot_progress(self): if self.local_rank == 0: super().plot_progress() def print_to_log_file(self, *args, also_print_to_console=True): if self.local_rank == 0: super().print_to_log_file(*args, also_print_to_console=also_print_to_console) def process_plans(self, plans): super().process_plans(plans) if (self.patch_size != self.args.crop_size).any(): self.patch_size = self.args.crop_size self.set_batch_size_and_oversample() if self.args.config.find('500Region') != -1: self.num_classes = len(self.regions) # only care about foreground (compatible with sigmoid) def initialize(self, training=True, force_load_plans=False): """ :param training: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if self.args.config.find('500Region') != -1: # BraTSRegions_moreDA self.setup_DA_params_BraTSRegions() if hasattr(self.args, 'deep_supervision_scales') and len(self.args.deep_supervision_scales)>0: self.deep_supervision_scales = self.args.deep_supervision_scales # overwrite setup_DA_params() from nnUNetTrainerV2 self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset")
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #installed package class nnUNetTrainerV2_DDP(nnUNetTrainerV2): def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False, model="Generic_UNet", input_size=(64, 160, 160), args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.init_args = ( plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, distribute_batch_size, fp16) assert args is not None self.args = args if self.args.config.find('500Region') != -1: self.regions = {"whole tumor": (1, 2, 3), "tumor core": (2, 3), "enhancing tumor": (3,) # correct } if self.args.config.find('500RegionFix') != -1: self.regions = {"whole tumor": (1, 2, 3), "tumor core": (2, 3), "enhancing tumor": (2,) # fig 1: the innermost tumor, but this is a bug!! } self.regions_class_order = (1, 2, 3) self.layer_decay = args.layer_decay self.lr_scheduler_name = args.lrschedule # [ TO DO ] self.reclip = args.reclip self.warmup_epochs = args.warmup_epochs self.min_lr = args.min_lr self.is_spatial_aug_only = args.is_spatial_aug_only if "model_params" in args: self.model_params = args.model_params else: self.model_params = {} self.optim_name = args.optim_name self.find_zero_weight_decay = args.find_zero_weight_decay self.model = args.model self.resume = args.resume self.input_size=input_size self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 8 gpu training self.initial_lr = args.initial_lr # 8 * 0.01 self.weight_decay = args.weight_decay # 3e-5 in nnUNetTrainer.py self.save_every = 1 # prev 50 self.distribute_batch_size = distribute_batch_size np.random.seed(local_rank) torch.manual_seed(local_rank) if torch.cuda.is_available(): torch.cuda.manual_seed_all(local_rank) self.local_rank = local_rank if torch.cuda.is_available(): torch.cuda.set_device(local_rank) # dist.init_process_group(backend='nccl', init_method='env://') # init outside self.loss = None self.ce_loss = RobustCrossEntropyLoss() self.global_batch_size = None # we need to know this to properly steer oversample def setup_DA_params_BraTSRegions(self): # nnUNetTrainerV2.setup_DA_params(self) self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size self.data_aug_params["p_rot"] = 0.3 self.data_aug_params["scale_range"] = (0.65, 1.6) self.data_aug_params["p_scale"] = 0.3 self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_elastic"] = True self.data_aug_params["p_eldef"] = 0.3 # LMH 0.2 -> 0.3 according to paper self.data_aug_params["eldef_deformation_scale"] = (0, 0.25) self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 0.5 self.data_aug_params['gamma_range'] = (0.5, 1.6) self.data_aug_params['num_cached_per_thread'] = 4 def set_batch_size_and_oversample(self): batch_sizes = [] oversample_percents = [] world_size = self.args.world_size# dist.get_world_size() my_rank = self.args.rank # dist.get_rank() # not local_rank if self.args.total_batch_size: # actually it is global_batch_size # reset the batch_size per gpu accordingly self.batch_size = self.args.total_batch_size // world_size # if self.args.local_rank == 0: # print("total_batch_size: %d, updated batch_size per gpu %d, world_size %d" % (self.args.total_batch_size, self.batch_size, world_size)) if self.distribute_batch_size: # set total batch_size to 16 will be fine... self.global_batch_size = self.batch_size else: self.global_batch_size = self.batch_size * world_size batch_size_per_GPU = np.ceil(self.batch_size / world_size).astype(int) # probably 1 for rank in range(world_size): if self.distribute_batch_size: if (rank + 1) * batch_size_per_GPU > self.batch_size: batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - self.batch_size) else: batch_size = batch_size_per_GPU else: batch_size = self.batch_size batch_sizes.append(batch_size) sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1]) sample_id_high = np.sum(batch_sizes) if sample_id_high / self.global_batch_size < (1 - self.oversample_foreground_percent): oversample_percents.append(0.0) elif sample_id_low / self.global_batch_size > (1 - self.oversample_foreground_percent): oversample_percents.append(1.0) else: percent_covered_by_this_rank = sample_id_high / self.global_batch_size - sample_id_low / self.global_batch_size oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) - sample_id_low / self.global_batch_size) / percent_covered_by_this_rank) oversample_percents.append(oversample_percent_here) print("worker", my_rank, "oversample", oversample_percents[my_rank]) print("worker", my_rank, "batch_size", batch_sizes[my_rank]) # batch_sizes [self.batch_size]*world_size self.batch_size = batch_sizes[my_rank] self.oversample_foreground_percent = oversample_percents[my_rank] def save_checkpoint(self, fname, save_optimizer=True): if self.local_rank == 0: super().save_checkpoint(fname, save_optimizer) def plot_progress(self): if self.local_rank == 0: super().plot_progress() def print_to_log_file(self, *args, also_print_to_console=True): if self.local_rank == 0: super().print_to_log_file(*args, also_print_to_console=also_print_to_console) def process_plans(self, plans): super().process_plans(plans) if (self.patch_size != self.args.crop_size).any(): self.patch_size = self.args.crop_size self.set_batch_size_and_oversample() if self.args.config.find('500Region') != -1: self.num_classes = len(self.regions) # only care about foreground (compatible with sigmoid) def initialize(self, training=True, force_load_plans=False): """ :param training: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if self.args.config.find('500Region') != -1: # BraTSRegions_moreDA self.setup_DA_params_BraTSRegions() if hasattr(self.args, 'deep_supervision_scales') and len(self.args.deep_supervision_scales)>0: self.deep_supervision_scales = self.args.deep_supervision_scales # overwrite setup_DA_params() from nnUNetTrainerV2 self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
3
2023-10-11 05:19:25+00:00
24k
eai-lab/On-NAS
cifar_search.py
[ { "identifier": "genotypes", "path": "utils/genotypes.py", "snippet": "PRIMITIVES = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n \"none\",\n]\nPRIMITIVES_FEWSHOT = [\n ...
import os import torch import torch.nn as nn import numpy as np import utils.utils as utils import random import time import pandas as pd import copy import argparse from utils import genotypes as gt from models.search_cnn import SearchCNNController from models.search_cnn_PC import SearchCNNControllerPC from task_optimizer.darts import Darts,Architect from task_optimizer.darts import train as d_train from tqdm import tqdm from tqdm import tqdm
15,613
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer,
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer,
PRIMITIVES=gt.PRIMITIVES,
0
2023-10-08 02:42:27+00:00
24k
LukeForeverYoung/UReader
serve/model_worker.py
[ { "identifier": "IO", "path": "serve/io_utils.py", "snippet": "class IO:\n @staticmethod\n def register(options):\n pass\n\n def open(self, path: str, mode: str):\n raise NotImplementedError\n\n def exists(self, path: str) -> bool:\n raise NotImplementedError\n\n def ...
from PIL import Image from io import BytesIO from .io_utils import IO, DefaultIO, OSS from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from mplug_owl.configuration_mplug_owl import MplugOwlConfig from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer from transformers import GenerationConfig from .model_utils import post_process_output, Stream, Iteratorize from pathlib import Path from mplug_owl.processing_mplug_owl import MplugOwlProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from pipeline.data_utils.processors.builder import build_processors from pipeline.data_utils.processors import * from transformers.models.llama.tokenization_llama import LlamaTokenizer from icecream import ic import torch import gradio as gr import logging import sys import os import json import requests import datetime import uuid import base64 import time import sys import transformers
14,627
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config
self.image_processor = build_processors(config['valid_processors'])['sft']
13
2023-10-08 06:29:02+00:00
24k
LeapLabTHU/Rank-DETR
projects/rank_detr/configs/models/rank_detr_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of th...
import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.rank_detr.modeling import ( RankDETR, RankDetrTransformerEncoder, RankDetrTransformerDecoder, RankDetrTransformer, RankDetrCriterion, HighOrderMatcher, )
18,632
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)( encoder=L(RankDetrTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, use_checkpoint=False, num_feature_levels="${..num_feature_levels}", ), decoder=L(RankDetrTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, look_forward_twice=True, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals="${..two_stage_num_proposals}", rank_adaptive_classhead="${..rank_adaptive_classhead}", query_rank_layer=True, ), as_two_stage="${..as_two_stage}", num_feature_levels=4, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals=1800, mixed_selection=True, rank_adaptive_classhead="${..rank_adaptive_classhead}", ), embed_dim=256, num_classes=80, num_queries_one2one=300, num_queries_one2many=1500, aux_loss=True, with_box_refine=False, as_two_stage=False,
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)( encoder=L(RankDetrTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, use_checkpoint=False, num_feature_levels="${..num_feature_levels}", ), decoder=L(RankDetrTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, look_forward_twice=True, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals="${..two_stage_num_proposals}", rank_adaptive_classhead="${..rank_adaptive_classhead}", query_rank_layer=True, ), as_two_stage="${..as_two_stage}", num_feature_levels=4, num_queries_one2one="${..num_queries_one2one}", num_queries_one2many="${..num_queries_one2many}", two_stage_num_proposals=1800, mixed_selection=True, rank_adaptive_classhead="${..rank_adaptive_classhead}", ), embed_dim=256, num_classes=80, num_queries_one2one=300, num_queries_one2many=1500, aux_loss=True, with_box_refine=False, as_two_stage=False,
criterion=L(RankDetrCriterion)(
7
2023-10-12 03:02:25+00:00
24k
sakemin/cog-musicgen-remixer
predict.py
[ { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying ...
import os import random import torchaudio import typing as tp import numpy as np import torch import librosa import subprocess import math import allin1 import pytsmod as tsm import shutil import shutil from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf from audiocraft.modules.btc.btc_model import BTC_model from audiocraft.modules.btc.utils.mir_eval_modules import idx2chord from demucs.audio import convert_audio from demucs.apply import apply_model
14,818
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path )
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path )
lm = load_lm_model(model_id, device=device, cache_dir=model_path)
4
2023-10-09 09:55:24+00:00
24k
oracle/guardian-ai
tests/unitary/test_fairness_metrics.py
[ { "identifier": "ConsistencyScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class ConsistencyScorer(_SimpleDatasetFairnessScorer):\n \"\"\"\n Measures the consistency of a dataset.\n\n Consistency is measured as the number of ratio of instances that have a\n different...
import math import numpy as np import pandas as pd import pytest import sklearn from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from guardian_ai.fairness.metrics.dataset import ( ConsistencyScorer, DatasetStatisticalParityScorer, SmoothedEDFScorer, consistency, dataset_statistical_parity, smoothed_edf, ) from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAITypeError, GuardianAIValueError from tests.utils import get_dummy_dataset
21,103
# Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") # Do not accept None distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure=None) else: # Accepts only None as distance_measure X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure=None), float ) with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", DATASET_SCORERS_USING_DISTANCE) def test_dataset_scorer_distance(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = DATASET_X_Y_SCORERS[scorer] subgroup_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="something") with pytest.raises(GuardianAIValueError): subgroup_scorer(target, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS) def test_model_scorers_y_format(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model run_y_true_tests = scorer not in MODEL_SCORERS_ALLOWING_Y_TRUE_NONE scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Accept same number of instances y_pred = target.copy() assert isinstance(scorer(target, y_pred, subgroups), float) # Do not accept different number of instances if run_y_true_tests: y_pred = target[:-1].copy() with pytest.raises(GuardianAIValueError): scorer(target, y_pred, subgroups) # Do not accept multiclass classification multiclass_y = target.copy() multiclass_y[:3] = 2 # Change a few labels assert multiclass_y.nunique() == 3 # Sanity check with pytest.raises(GuardianAIValueError): scorer(multiclass_y, y_pred, subgroups) # Do not accept non-array inputs if run_y_true_tests: y_pred = target.copy() bad_target = {i: itm for i, itm in enumerate(target)}
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer, "false_positive_rate_scorer": FalsePositiveRateScorer, "false_negative_rate_scorer": FalseNegativeRateScorer, "false_omission_rate_scorer": FalseOmissionRateScorer, "false_discovery_rate_scorer": FalseDiscoveryRateScorer, "error_rate_scorer": ErrorRateScorer, "equalized_odds_scorer": EqualizedOddsScorer, "theil_index_scorer": TheilIndexScorer, } MODEL_SUBGROUPS_SCORERS = { "model_statistical_parity_scorer": model_statistical_parity, "true_positive_rate_scorer": true_positive_rate, "false_positive_rate_scorer": false_positive_rate, "false_negative_rate_scorer": false_negative_rate, "false_omission_rate_scorer": false_omission_rate, "false_discovery_rate_scorer": false_discovery_rate, "error_rate_scorer": error_rate, "equalized_odds_scorer": equalized_odds, "theil_index_scorer": theil_index, } MODEL_SCORERS_ALLOWING_REDUCTION = list(MODEL_X_Y_SCORERS.keys()) MODEL_SCORERS_USING_DISTANCE = [ scorer for scorer in MODEL_X_Y_SCORERS if scorer != "theil_index_scorer" ] MODEL_SCORERS_ALLOWING_Y_TRUE_NONE = ["model_statistical_parity_scorer"] DATASET_X_Y_SCORERS = { "dataset_statistical_parity_scorer": DatasetStatisticalParityScorer, "consistency_scorer": ConsistencyScorer, "smoothed_edf_scorer": SmoothedEDFScorer, } DATASET_SUBGROUPS_SCORERS = { "dataset_statistical_parity_scorer": dataset_statistical_parity, "consistency_scorer": consistency, "smoothed_edf_scorer": smoothed_edf, } DATASET_SCORERS_ALLOWING_REDUCTION = ["dataset_statistical_parity_scorer"] DATASET_SCORERS_USING_DISTANCE = ["dataset_statistical_parity_scorer"] ALL_X_Y_SCORERS = {**MODEL_X_Y_SCORERS, **DATASET_X_Y_SCORERS} SENSITIVE_FEATURES_VARIATIONS = { "one_attr_two_classes": {"n_classes": (2,)}, "one_attr_n_classes": {"n_classes": (4,)}, "n_attrs": {"n_classes": (3, 4)}, } class DummyBinaryStochasticModel: def predict(self, X): return np.random.randint(0, 2, size=X.shape[0]) def create_concat_sensitive_attrs(dataset, n_classes): if not isinstance(n_classes, list): n_classes = list(n_classes) sensitive_dataset = dataset.copy() sensitive_attrs_names = [] for i, n_classes_i in enumerate(n_classes): sensitive_vals = np.array( [f"sensitive_val_{idx}" for idx in range(n_classes_i)] ) attr_name = f"sensitive_attr_{i}" sensitive_dataset = concat_sensitive_attr_column( sensitive_vals, sensitive_dataset, attr_name ) sensitive_attrs_names.append(attr_name) return sensitive_dataset, sensitive_attrs_names def concat_sensitive_attr_column(vals, dataset, attr_name): sensitive_vals = np.random.choice(vals, size=len(dataset)) sensitive_feats = pd.DataFrame(np.transpose(sensitive_vals), columns=[attr_name]) return pd.concat([dataset, sensitive_feats], axis=1) @pytest.fixture(scope="module") def model_type(): return "LogisticRegression" @pytest.fixture(scope="module") def base_dataset(): return get_dummy_dataset(n_samples=500, n_features=5, n_classes=2) @pytest.fixture( scope="module", params=SENSITIVE_FEATURES_VARIATIONS.values(), ids=SENSITIVE_FEATURES_VARIATIONS.keys(), ) def sensitive_dataset_and_model(model_type, base_dataset, request): dataset, target = base_dataset dataset, sensitive_attr_names = create_concat_sensitive_attrs( dataset, **request.param ) model = Pipeline( steps=[ ("preprocessor", OneHotEncoder(handle_unknown="ignore")), ("classifier", RandomForestClassifier()), ] ) model.fit(dataset, target) return dataset, target, model, sensitive_attr_names @pytest.mark.parametrize("scorer", DATASET_X_Y_SCORERS.keys()) def test_dataset_X_y_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = DATASET_X_Y_SCORERS[scorer](sensitive_attr_names) # Validate call signatures assert isinstance(scorer(X=dataset, y_true=target), float) assert isinstance(scorer(None, dataset, target), float) with pytest.raises(GuardianAIValueError): scorer(dataset, target) # Two ways to call metric are equivalent assert is_close(scorer(X=dataset, y_true=target), scorer(None, dataset, target)) @pytest.mark.parametrize("scorer", DATASET_SUBGROUPS_SCORERS.keys()) def test_dataset_subgroups_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate call signatures assert isinstance(scorer(target, subgroups), float) @pytest.mark.parametrize("scorer", DATASET_SUBGROUPS_SCORERS.keys()) def test_dataset_scorers_equivalence(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer = DATASET_X_Y_SCORERS[scorer](sensitive_attr_names) subgroup_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate same value assert is_close( subgroup_scorer(target, subgroups), X_y_scorer(X=dataset, y_true=target) ) @pytest.mark.parametrize("scorer", MODEL_X_Y_SCORERS.keys()) def test_model_X_y_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) # Validate call signature assert isinstance(scorer(model, dataset, target), float) # Utility that can ignore some input columns. Useful for testing when a # column is moved from X to supp_features and wanting identical model # predictions class ModelIgnoreOtherFeatures(sklearn.base.BaseEstimator): def __init__(self, model, features_to_keep): self.model = model self.features_to_keep = features_to_keep def _trim_X(self, X): features_to_drop = [ col for col in X.columns if col not in self.features_to_keep ] return X.drop(columns=features_to_drop) def predict(self, X): return self.model.predict(self._trim_X(X)) def predict_proba(self, X): return self.model.predict_proba(self._trim_X(X)) @pytest.mark.parametrize("scorer", ALL_X_Y_SCORERS.keys()) def test_X_y_scorer_supp_features(base_dataset, model_type, scorer): # Need to create our own dataset and model to test variations of supp_features dataset_no_sf, target = base_dataset dataset, sensitive_attr_names = create_concat_sensitive_attrs( dataset_no_sf, n_classes=(3, 4) ) if scorer in MODEL_X_Y_SCORERS: scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) model = Pipeline( steps=[ ("preprocessor", OneHotEncoder(handle_unknown="ignore")), ("classifier", RandomForestClassifier()), ] ) model.fit(dataset_no_sf, target) model = ModelIgnoreOtherFeatures(model, dataset_no_sf.columns) else: scorer = DATASET_X_Y_SCORERS[scorer](sensitive_attr_names) model = None correct_score = scorer(model, dataset, target) # All sensitive features are in X (default) assert is_close(scorer(model, dataset, target), correct_score) # All sensitive features are in supplementary_features dataset_no_sf = dataset.drop(columns=sensitive_attr_names) all_supp_features = dataset[sensitive_attr_names] assert is_close( scorer(model, dataset_no_sf, target, supplementary_features=all_supp_features), correct_score, ) # Features are split across X and supplementary_features some_sf = [sensitive_attr_names[0]] dataset_some_sf = dataset.drop(columns=some_sf) supp_features = dataset[some_sf] assert is_close( scorer(model, dataset_some_sf, target, supplementary_features=supp_features), correct_score, ) # supplementary_features invalid type with pytest.raises(GuardianAIValueError): scorer(model, dataset, target, supplementary_features=[]) # Duplicate features between X and supplementary_features with pytest.raises(GuardianAIValueError): scorer(model, dataset, target, supplementary_features=all_supp_features) @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS.keys()) def test_model_subgroups_scorer_signature(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) # Validate call signatures assert isinstance(scorer(target, y_pred, subgroups), float) @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS.keys()) def test_model_scorers_equivalence(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) subgroup_scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) # Validate same value assert is_close( subgroup_scorer(target, y_pred, subgroups), X_y_scorer(model, dataset, target) ) @pytest.mark.parametrize("scorer", ALL_X_Y_SCORERS.keys()) def test_X_y_scorer_sensitive_attr_formats(base_dataset, scorer): dataset, target = base_dataset sensitive_attrs_name = "sensitive_attr" if scorer in MODEL_X_Y_SCORERS: scorer = MODEL_X_Y_SCORERS[scorer]([sensitive_attrs_name]) model = DummyBinaryStochasticModel() else: scorer = DATASET_X_Y_SCORERS[scorer]([sensitive_attrs_name]) model = None # Accept str vals vals = [f"val_{i}" for i in range(5)] sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) assert isinstance(scorer(model, sensitive_dataset, target), float) # Accept categorical vals vals = list(range(5)) sensitive_vals = np.random.choice(vals, size=len(dataset)) sensitive_feats = pd.Series( np.transpose(sensitive_vals), dtype="category", name=sensitive_attrs_name ) sensitive_dataset = pd.concat([dataset, sensitive_feats], axis=1) assert isinstance(scorer(model, sensitive_dataset, target), float) # Accept bool vals vals = [True, False] sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) assert isinstance(scorer(model, sensitive_dataset, target), float) # Reject (non-categoricalized) integer vals vals = list(range(5)) sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) with pytest.raises(GuardianAIValueError): scorer(model, sensitive_dataset, target) # Reject float vals vals = np.random.rand(5) sensitive_dataset = concat_sensitive_attr_column( vals, dataset, sensitive_attrs_name ) with pytest.raises(GuardianAIValueError): scorer(model, sensitive_dataset, target) @pytest.mark.parametrize("scorer", MODEL_X_Y_SCORERS.keys()) def test_model_metrics_y_true_None(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer = MODEL_X_Y_SCORERS[scorer](sensitive_attr_names) subgroup_scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) if scorer in MODEL_SCORERS_ALLOWING_Y_TRUE_NONE: # Can pass y_true=None assert isinstance(X_y_scorer(model, dataset, None), float) assert isinstance(subgroup_scorer(None, y_pred, subgroups), float) # Cannot pass only two arguments with pytest.raises(GuardianAIValueError): subgroup_scorer(y_pred, subgroups) else: with pytest.raises(GuardianAIValueError): X_y_scorer(model, dataset, None) with pytest.raises(GuardianAIValueError): subgroup_scorer(None, y_pred, subgroups) @pytest.mark.parametrize("scorer", ALL_X_Y_SCORERS.keys()) def test_X_y_scorer_feature_not_in_dataset(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model if scorer in MODEL_X_Y_SCORERS: scorer_maker = MODEL_X_Y_SCORERS[scorer] else: scorer_maker = DATASET_X_Y_SCORERS[scorer] model = None # Correct output if features present scorer_obj = scorer_maker(sensitive_attr_names) assert isinstance(scorer_obj(model, dataset, target), float) # Error if one missing feature one_missing_feature = sensitive_attr_names + ["missing_feature"] scorer_obj = scorer_maker(one_missing_feature) with pytest.raises(GuardianAIValueError): scorer_obj(model, dataset, target) # Error if all missing features all_missing_features = [f"missing_feature_{i}" for i in range(3)] scorer_obj = scorer_maker(all_missing_features) with pytest.raises(GuardianAIValueError): scorer_obj(model, dataset, target) @pytest.mark.parametrize("scorer", MODEL_SCORERS_ALLOWING_REDUCTION) def test_model_scorer_reduction(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = MODEL_X_Y_SCORERS[scorer] subgroups_scorer = MODEL_SUBGROUPS_SCORERS[scorer] y_pred = model.predict(dataset) subgroups = dataset[sensitive_attr_names] # Mean reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="mean") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroups_scorer(target, y_pred, subgroups, reduction="mean"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroups_scorer(target, y_pred, subgroups, reduction="mean"), ) # Max reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="max") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroups_scorer(target, y_pred, subgroups, reduction="max"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroups_scorer(target, y_pred, subgroups, reduction="max"), ) # None reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction=None) X_y_result = X_y_scorer(model, dataset, target) assert isinstance(X_y_result, dict) subgroups_result = subgroups_scorer(target, y_pred, subgroups, reduction=None) assert isinstance(subgroups_result, dict) assert X_y_result == approx_dict(subgroups_result) # Other value with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="other") with pytest.raises(GuardianAIValueError): subgroups_scorer(target, y_pred, subgroups, reduction="other") @pytest.mark.parametrize("scorer", DATASET_SCORERS_ALLOWING_REDUCTION) def test_dataset_scorer_reduction(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = DATASET_X_Y_SCORERS[scorer] subgroups_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Mean reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="mean") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance(subgroups_scorer(target, subgroups, reduction="mean"), float) assert is_close( X_y_scorer(None, dataset, target), subgroups_scorer(target, subgroups, reduction="mean"), ) # Max reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="max") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance(subgroups_scorer(target, subgroups, reduction="max"), float) assert is_close( X_y_scorer(None, dataset, target), subgroups_scorer(target, subgroups, reduction="max"), ) # None reduction X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction=None) X_y_result = X_y_scorer(None, dataset, target) assert isinstance(X_y_result, dict) subgroups_result = subgroups_scorer(target, subgroups, reduction=None) assert isinstance(subgroups_result, dict) assert X_y_result == approx_dict(subgroups_result) # Other value with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, reduction="other") with pytest.raises(GuardianAIValueError): subgroups_scorer(target, subgroups, reduction="other") @pytest.mark.parametrize("scorer", MODEL_X_Y_SCORERS.keys()) def test_model_scorer_distance(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = MODEL_X_Y_SCORERS[scorer] subgroup_scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] y_pred = model.predict(dataset) if scorer in MODEL_SCORERS_USING_DISTANCE: # Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(model, dataset, target), subgroup_scorer(target, y_pred, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") # Do not accept None distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure=None) else: # Accepts only None as distance_measure X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure=None) assert isinstance(X_y_scorer(model, dataset, target), float) assert isinstance( subgroup_scorer(target, y_pred, subgroups, distance_measure=None), float ) with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn( sensitive_attr_names, distance_measure="something" ) with pytest.raises(GuardianAIValueError): subgroup_scorer(target, y_pred, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", DATASET_SCORERS_USING_DISTANCE) def test_dataset_scorer_distance(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model X_y_scorer_fn = DATASET_X_Y_SCORERS[scorer] subgroup_scorer = DATASET_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Validate for ratio distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="ratio") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="ratio"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="ratio"), ) # Validate for diff distance X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="diff") assert isinstance(X_y_scorer(None, dataset, target), float) assert isinstance( subgroup_scorer(target, subgroups, distance_measure="diff"), float ) assert is_close( X_y_scorer(None, dataset, target), subgroup_scorer(target, subgroups, distance_measure="diff"), ) # Do not accept other distances with pytest.raises(GuardianAIValueError): X_y_scorer = X_y_scorer_fn(sensitive_attr_names, distance_measure="something") with pytest.raises(GuardianAIValueError): subgroup_scorer(target, subgroups, distance_measure="something") @pytest.mark.parametrize("scorer", MODEL_SUBGROUPS_SCORERS) def test_model_scorers_y_format(sensitive_dataset_and_model, scorer): dataset, target, model, sensitive_attr_names = sensitive_dataset_and_model run_y_true_tests = scorer not in MODEL_SCORERS_ALLOWING_Y_TRUE_NONE scorer = MODEL_SUBGROUPS_SCORERS[scorer] subgroups = dataset[sensitive_attr_names] # Accept same number of instances y_pred = target.copy() assert isinstance(scorer(target, y_pred, subgroups), float) # Do not accept different number of instances if run_y_true_tests: y_pred = target[:-1].copy() with pytest.raises(GuardianAIValueError): scorer(target, y_pred, subgroups) # Do not accept multiclass classification multiclass_y = target.copy() multiclass_y[:3] = 2 # Change a few labels assert multiclass_y.nunique() == 3 # Sanity check with pytest.raises(GuardianAIValueError): scorer(multiclass_y, y_pred, subgroups) # Do not accept non-array inputs if run_y_true_tests: y_pred = target.copy() bad_target = {i: itm for i, itm in enumerate(target)}
with pytest.raises(GuardianAITypeError):
24
2023-10-09 09:48:50+00:00
24k
jiangjiechen/auction-arena
app.py
[ { "identifier": "create_items", "path": "src/item_base.py", "snippet": "def create_items(item_info_jsl):\n '''\n item_info: a list of dict (name, price, desc, id)\n '''\n item_info_jsl = LoadJsonL(item_info_jsl)\n item_list = []\n for info in item_info_jsl:\n item_list.append(It...
import os import gradio as gr from app_modules.presets import * from app_modules.overwrites import * from app_modules.utils import * from src.item_base import create_items from src.bidder_base import Bidder from src.human_bidder import HumanBidder from src.auctioneer_base import Auctioneer from auction_workflow import run_auction, make_auction_hash from utils import chunks, reset_state_list
15,763
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else: bidder_list.append(Bidder.create(**js))
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else: bidder_list.append(Bidder.create(**js))
yield from run_auction(auction_hash, auctioneer, bidder_list, thread_num, yield_for_demo=True)
4
2023-10-08 09:30:57+00:00
24k
sakemin/cog-musicgen-chord
predict.py
[ { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an ...
import os import random import torchaudio import typing as tp import numpy as np import torch import subprocess from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.solvers.compression import CompressionSolver from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf
17,697
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg
compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device)
3
2023-10-09 09:52:24+00:00
24k
zhijie-group/LOVECon
test_lovecon.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
import os import copy import click import re import numpy as np import torch import torch.utils.data import torch.utils.checkpoint import decord import shutil from glob import glob from typing import Optional,Dict from tqdm.auto import tqdm from omegaconf import OmegaConf from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.utils.import_utils import is_xformers_available from transformers import AutoTokenizer, CLIPTextModel from einops import rearrange from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.data.dataset import ImageSequenceDataset from video_diffusion.common.util import get_time_string, get_function_args from video_diffusion.common.logger import get_logger_config_path from video_diffusion.common.image_util import log_train_samples from video_diffusion.common.instantiate_from_config import instantiate_from_config from video_diffusion.pipelines.p2p_validation_loop_controlnet import P2pSampleLogger from annotator.util import get_control from video_diffusion.pipelines.DDIMInterpolationScheduler import DDIMInterpolationScheduler from RIFEModel.RIFE_HDv3 import Model
19,167
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ):
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ):
args = get_function_args()
4
2023-10-09 14:38:28+00:00
24k
LiYunfengLYF/LightFC
lib/train/data/base_functions.py
[ { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\r\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\r\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\r\n ...
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.optimizer.anan import Adan from lib.train.optimizer.lion import Lion from lib.utils.misc import is_main_process
20,468
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb")
datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader,
10
2023-10-08 11:44:32+00:00
24k
LiyaoTang/ERDA
utils/trainer.py
[ { "identifier": "log_config", "path": "config/utils.py", "snippet": "def log_config(config, title='', f_out=None, prefix='', base=None):\n if f_out is None:\n f_out = sys.stdout\n if base is None:\n root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')\n sys.path ...
import os, re, gc, sys, time, pickle, psutil, subprocess import numpy as np import tensorflow as tf from config import log_config from utils.logger import print_dict, print_table from utils.ply import read_ply, write_ply from utils.tester import ModelTester from utils.average_gradients import average_gradients from utils.AdamWOptimizer import AdamWeightDecayOptimizer from utils.logger import setup_logger from utils.scheduler import StepScheduler, LrScheduler from utils.metrics import AverageMeter from utils.tf_graph_builder import GraphBuilder
19,683
batch_idx = 0 end = time.time() while True: try: rst = sess.run(train_ops, feed_dict=feed_dict) if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx # Debug methods # ------------------------------------------------------------------------------------------------------------------ def show_memory_usage(self, batch_to_feed): for l in range(self.config.num_layers): neighb_size = list(batch_to_feed[self.in_neighbors_f32[l]].shape) dist_size = neighb_size + [self.config.num_kernel_points, 3] dist_memory = np.prod(dist_size) * 4 * 1e-9 in_feature_size = neighb_size + [self.config.first_features_dim * 2**l] in_feature_memory = np.prod(in_feature_size) * 4 * 1e-9 out_feature_size = [neighb_size[0], self.config.num_kernel_points, self.config.first_features_dim * 2**(l+1)] out_feature_memory = np.prod(out_feature_size) * 4 * 1e-9 print('Layer {:d} => {:.1f}GB {:.1f}GB {:.1f}GB'.format(l, dist_memory, in_feature_memory, out_feature_memory)) print('************************************') def train_one_epoch_debug(self, sess, ops, epoch, lr, g=None): """ One epoch training """ config = self.config is_training = True batch_time = AverageMeter() loss_meter = {k: AverageMeter() for k in ops['loss_dict']} inputs = self.model.inputs inputs_flat = {k: v for k, v in inputs.items() if not isinstance(v, (list, dict))} train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict'], 'inputs': inputs_flat, 'result_dict': ops['result_dict']} assert_ops = inputs['assert_ops'] if 'assert_ops' in inputs and len(inputs['assert_ops']) > 0 else [] feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr} sess.run(ops['train_init_op']) if config.debug_grads: assert g is not None # [(g, v), ...] train_ops['grads'] = g.grads batch_idx = 0 end = time.time() while True: try: with tf.control_dependencies(assert_ops): rst = sess.run(train_ops, feed_dict=feed_dict) # NaN appears if config.debug_grads: self.debug_grads_nan(sess, inputs, train_ops, rst) if any([np.isnan(v) for v in rst['loss_dict'].values()]): self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered !!!') if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx def debug_grads_nan(self, sess, inputs, ops, rst): grads = ops['grads'] grads_v = rst['grads'] nan_grads = [(g, v, g_val, v_val) for (g, v), (g_val, v_val) in zip(grads, grads_v) if np.isnan(g_val).any() or np.isnan(v_val).any()] if not nan_grads: return lines = [] for g, v, g_val, v_val in nan_grads: g_nan = 100 * np.sum(np.isnan(g_val)) / np.prod(g_val.shape) v_nan = 100 * np.sum(np.isnan(v_val)) / np.prod(v_val.shape) lines.append([v.name, g, '-', v_val.shape, f'/ {v_nan:.1f}', 'val nan', g_val.shape, f'/ {g_nan:.1f}', 'grad nan']) print_table(lines) self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered in grads checking !!!') return def debug_nan(self, sess, inputs, result_dict, loss_dict): """ NaN happened, find where """ print('\n\n------------------------ NaN DEBUG ------------------------\n') print('loss_dict :') print('*******************\n')
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\t') print('} --------- printing grads') # all ops in graph print('\n --------- all ops {') re_list = ['optimizer.*', 'gpu_.*', 'gradients.*', 'save.*'] # '.*/batch_normalization/.*', '.*/bias:.*' # skipping for n in tf.get_default_graph().as_graph_def().node: if any([bool(re.fullmatch(expr, n.name)) for expr in re_list]): continue print('\t', n.name) print('} --------- all ops') # model params all_params_size = sum([np.prod(v.shape) for _, v in grads]) # all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for _, v in grads]) # all_params_size = sess.run(all_params_size) print(f'==> Model have {all_params_size} total Params', flush=True) # init sess sess.run(tf.global_variables_initializer()) if self.config.model_path: except_list = [f'.*{n}.*' for n in self.config.exclude_vars] + ['optimizer.*'] if not self.config.continue_training else [] g.restore(sess, self.config.model_path, except_list=except_list) print(f'Model restored -- {self.config.model_path}') # running voting - used throughout the training process (accumulated voting) validation_probs = self.tester.init_pointcloud_log(dataset, 'validation', config.num_classes) # train func if config.debug_nan: self.train_one_epoch = self.train_one_epoch_debug # train metric_best = None # save_snap = [i for i in range(1, config.max_epoch + 1) if i % config.save_freq == 0] lr_scheduler = LrScheduler(config) snap_path = os.path.join(config.saving_path, config.snap_dir, config.snap_prefix) for epoch in range(1, config.max_epoch + 1): print(f'\n****EPOCH {epoch}****') lr = lr_scheduler.learning_rate tic1 = time.time() step = self.train_one_epoch(sess, ops, epoch, lr, g=g) tic2 = time.time() print(f'total time: {(tic2 - tic1)/60:.1f}min, learning rate = {lr:.7f}', flush=True) if epoch % config.val_freq == 0: metric = self.tester.val_running_vote(sess, ops, dataset, model, validation_probs) # running voting if metric_best is None or metric > metric_best: # keep the best val metric_best = metric saver.save(sess, snap_path + '-best') print('best saved') # if config.save_best: # saver.save(sess, snap_path + '-best') # if config.save_best == 'center': # epoch_start = max(epoch // config.save_freq - config.max_to_keep // 2, 1) # save_snap = [i * config.save_freq for i in range(epoch_start, epoch_start + config.max_to_keep + 1)] # save_snap = [i for i in save_snap if i != epoch] # if epoch in save_snap: if config.save_freq and epoch % config.save_freq == 0: saver.save(sess, snap_path, global_step=epoch) lr_scheduler.step(epoch=1, step=step) # val & save last model if missed if epoch % config.val_freq != 0: self.tester.val_running_vote(sess, ops, dataset, model, validation_probs) if config.save_freq and epoch % config.save_freq != 0: saver.save(sess, snap_path, global_step=epoch) print('\nfinished\n', flush=True) return def train_one_epoch(self, sess, ops, epoch, lr, g=None): """ One epoch training """ config = self.config is_training = True batch_time = AverageMeter() loss_meter = {k: AverageMeter() for k in ops['loss_dict']} train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict']} feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr} sess.run(ops['train_init_op']) batch_idx = 0 end = time.time() while True: try: rst = sess.run(train_ops, feed_dict=feed_dict) if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx # Debug methods # ------------------------------------------------------------------------------------------------------------------ def show_memory_usage(self, batch_to_feed): for l in range(self.config.num_layers): neighb_size = list(batch_to_feed[self.in_neighbors_f32[l]].shape) dist_size = neighb_size + [self.config.num_kernel_points, 3] dist_memory = np.prod(dist_size) * 4 * 1e-9 in_feature_size = neighb_size + [self.config.first_features_dim * 2**l] in_feature_memory = np.prod(in_feature_size) * 4 * 1e-9 out_feature_size = [neighb_size[0], self.config.num_kernel_points, self.config.first_features_dim * 2**(l+1)] out_feature_memory = np.prod(out_feature_size) * 4 * 1e-9 print('Layer {:d} => {:.1f}GB {:.1f}GB {:.1f}GB'.format(l, dist_memory, in_feature_memory, out_feature_memory)) print('************************************') def train_one_epoch_debug(self, sess, ops, epoch, lr, g=None): """ One epoch training """ config = self.config is_training = True batch_time = AverageMeter() loss_meter = {k: AverageMeter() for k in ops['loss_dict']} inputs = self.model.inputs inputs_flat = {k: v for k, v in inputs.items() if not isinstance(v, (list, dict))} train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict'], 'inputs': inputs_flat, 'result_dict': ops['result_dict']} assert_ops = inputs['assert_ops'] if 'assert_ops' in inputs and len(inputs['assert_ops']) > 0 else [] feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr} sess.run(ops['train_init_op']) if config.debug_grads: assert g is not None # [(g, v), ...] train_ops['grads'] = g.grads batch_idx = 0 end = time.time() while True: try: with tf.control_dependencies(assert_ops): rst = sess.run(train_ops, feed_dict=feed_dict) # NaN appears if config.debug_grads: self.debug_grads_nan(sess, inputs, train_ops, rst) if any([np.isnan(v) for v in rst['loss_dict'].values()]): self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered !!!') if (batch_idx + 1) % config.update_freq == 0: for k, v in rst['loss_dict'].items(): loss_meter[k].update(v) batch_time.update(time.time() - end) end = time.time() if (batch_idx + 1) % config.print_freq == 0: loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()]) print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True) batch_idx += 1 except tf.errors.OutOfRangeError: break return batch_idx def debug_grads_nan(self, sess, inputs, ops, rst): grads = ops['grads'] grads_v = rst['grads'] nan_grads = [(g, v, g_val, v_val) for (g, v), (g_val, v_val) in zip(grads, grads_v) if np.isnan(g_val).any() or np.isnan(v_val).any()] if not nan_grads: return lines = [] for g, v, g_val, v_val in nan_grads: g_nan = 100 * np.sum(np.isnan(g_val)) / np.prod(g_val.shape) v_nan = 100 * np.sum(np.isnan(v_val)) / np.prod(v_val.shape) lines.append([v.name, g, '-', v_val.shape, f'/ {v_nan:.1f}', 'val nan', g_val.shape, f'/ {g_nan:.1f}', 'grad nan']) print_table(lines) self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict']) raise ArithmeticError(f'NaN encountered in grads checking !!!') return def debug_nan(self, sess, inputs, result_dict, loss_dict): """ NaN happened, find where """ print('\n\n------------------------ NaN DEBUG ------------------------\n') print('loss_dict :') print('*******************\n')
print_dict(loss_dict)
1
2023-10-13 08:03:07+00:00
24k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/urllib3/connectionpool.py
[ { "identifier": "_TYPE_BODY", "path": "dist/py/Python38/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "HTTPHeaderDict", "path": "dist/py/Python38/site-packages/urllib3/_collecti...
import errno import logging import queue import sys import typing import warnings import weakref import ssl from socket import timeout as SocketTimeout from types import TracebackType from ._base_connection import _TYPE_BODY from ._collections import HTTPHeaderDict from ._request_methods import RequestMethods from .connection import ( BaseSSLError, BrokenPipeError, DummyConnection, HTTPConnection, HTTPException, HTTPSConnection, ProxyConfig, _wrap_proxy_error, ) from .connection import port_by_scheme as port_by_scheme from .exceptions import ( ClosedPoolError, EmptyPoolError, FullPoolError, HostChangedError, InsecureRequestWarning, LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, ) from .response import BaseHTTPResponse from .util.connection import is_connection_dropped from .util.proxy import connection_requires_http_tunnel from .util.request import _TYPE_BODY_POSITION, set_file_position from .util.retry import Retry from .util.ssl_match_hostname import CertificateError from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host from .util.url import parse_url from .util.util import to_str from typing_extensions import Literal from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection
21,463
""" Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None, _proxy: Url | None = None, _proxy_headers: typing.Mapping[str, str] | None = None,
from __future__ import annotations if typing.TYPE_CHECKING: log = logging.getLogger(__name__) _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] _SelfT = typing.TypeVar("_SelfT") # Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None, _proxy: Url | None = None, _proxy_headers: typing.Mapping[str, str] | None = None,
_proxy_config: ProxyConfig | None = None,
3
2023-10-11 09:08:57+00:00
24k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/residual.py
[ { "identifier": "Data", "path": "mtpy/modeling/modem/data.py", "snippet": "class Data:\n \"\"\"\n Data will read and write .dat files for ModEM and convert a WS data file\n to ModEM format.\n\n ..note: :: the data is interpolated onto the given periods such that all\n stations ...
from pathlib import Path from .data import Data from mtpy.modeling.plots import PlotRMS import numpy as np import pandas as pd
15,569
""" if self.dataframe is None: return for col in ["zxx", "zxy", "zyx", "zyy", "tzx", "tzy"]: with np.errstate(divide="ignore", invalid="ignore"): self.dataframe[f"rms_{col}"] = np.abs(self.dataframe[col]) / ( np.real(self.dataframe[f"{col}_model_error"]) * np.sqrt(2) ) @property def rms_per_period_all(self): """ RMS per period """ if self.dataframe is not None: rms_list = [] for period in self.dataframe.period.unique(): z_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy"], ] t_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_tzx", "rms_tzy"] ] rms_list.append( { "period": period, "rms_z": z_df.mean().mean(), "rms_t": t_df.mean().mean(), } ) df = pd.DataFrame(rms_list) df = df.set_index("period") return df @property def rms_per_period_per_component(self): """ RMS per period by component :return: DESCRIPTION :rtype: TYPE """ rms_list = [] for period in self.dataframe.period.unique(): comp_df = self.dataframe.loc[ self.dataframe.period == period, [ "rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy", "rms_tzx", "rms_tzy", ], ] mean_dict = {"period": period} for comp in comp_df.columns: mean_dict[comp] = comp_df.loc[:, comp].mean() rms_list.append(mean_dict) df = pd.DataFrame(rms_list) df = df.set_index("period") return df def plot_rms_per_period(self, plot_type="all", **kwargs): """ :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """ if plot_type == "all": df = self.rms_per_period_all.copy() elif plot_type == "comp": df = self.rms_per_period_per_component.copy() plot_list = [] color_list = [] for comp in df.columns: if not np.all(np.isnan(df[comp])): plot_list.append(comp) color_list.append(self.color_dict[comp]) ax = df.plot.bar( y=plot_list, color=color_list, xlabel="Period (s)", ylabel="normalized RMS", grid=True, **kwargs, ) ax.set_axisbelow(True) return ax def plot_rms(self, **kwargs): """ plot RMS in different views :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """
""" ================== ModEM ================== residuals class to contain RMS information revised by JP 2017 revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Residual(Data): """ class to contain residuals for each data point, and rms values for each station ====================== ==================================================== Attributes/Key Words Description ====================== ==================================================== work_dir residual_fn full path to data file residual_array numpy.ndarray (num_stations) structured to store data. keys are: * station --> station name * lat --> latitude in decimal degrees * lon --> longitude in decimal degrees * elev --> elevation (m) * rel_east -- > relative east location to center_position (m) * rel_north --> relative north location to center_position (m) * east --> UTM east (m) * north --> UTM north (m) * zone --> UTM zone * z --> impedance tensor residual (measured - modelled) (num_freq, 2, 2) * z_err --> impedance tensor error array with shape (num_freq, 2, 2) * tip --> Tipper residual (measured - modelled) (num_freq, 1, 2) * tipperr --> Tipper array with shape (num_freq, 1, 2) rms rms_array numpy.ndarray structured to store station location values and rms. Keys are: * station --> station name * east --> UTM east (m) * north --> UTM north (m) * lat --> latitude in decimal degrees * lon --> longitude in decimal degrees * elev --> elevation (m) * zone --> UTM zone * rel_east -- > relative east location to center_position (m) * rel_north --> relative north location to center_position (m) * rms --> root-mean-square residual for each station rms_tip rms_z ====================== ==================================================== """ # todo complete the doc above def __init__(self, **kwargs): self.work_dir = Path() self.residual_fn = None self.residual_array = None self.rms = None self.rms_array = None self.rms_tip = None self.rms_z = None super().__init__(**kwargs) self.color_dict = { "rms_z": (0, 162 / 255, 255 / 255), "rms_t": (255 / 255, 162 / 255, 0), "rms_zxx": (136 / 255, 235 / 255, 193 / 255), "rms_zxy": (84 / 255, 189 / 255, 215 / 255), "rms_zyx": (136 / 255, 84 / 255, 215 / 255), "rms_zyy": (206 / 255, 84 / 255, 215 / 255), "rms_tzx": (215 / 255, 210 / 255, 84 / 255), "rms_tzy": (215 / 255, 154 / 255, 84 / 255), } for key, value in kwargs.items(): setattr(self, key, value) def read_residual_file(self, residual_fn): """ :param residual_fn: DESCRIPTION, defaults to None :type residual_fn: TYPE, optional :return: DESCRIPTION :rtype: TYPE """ self.dataframe = self.read_data_file(residual_fn) self.calculate_rms() def calculate_rms(self): """ add columns for rms :return: DESCRIPTION :rtype: TYPE """ if self.dataframe is None: return for col in ["zxx", "zxy", "zyx", "zyy", "tzx", "tzy"]: with np.errstate(divide="ignore", invalid="ignore"): self.dataframe[f"rms_{col}"] = np.abs(self.dataframe[col]) / ( np.real(self.dataframe[f"{col}_model_error"]) * np.sqrt(2) ) @property def rms_per_period_all(self): """ RMS per period """ if self.dataframe is not None: rms_list = [] for period in self.dataframe.period.unique(): z_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy"], ] t_df = self.dataframe.loc[ self.dataframe.period == period, ["rms_tzx", "rms_tzy"] ] rms_list.append( { "period": period, "rms_z": z_df.mean().mean(), "rms_t": t_df.mean().mean(), } ) df = pd.DataFrame(rms_list) df = df.set_index("period") return df @property def rms_per_period_per_component(self): """ RMS per period by component :return: DESCRIPTION :rtype: TYPE """ rms_list = [] for period in self.dataframe.period.unique(): comp_df = self.dataframe.loc[ self.dataframe.period == period, [ "rms_zxx", "rms_zxy", "rms_zyx", "rms_zyy", "rms_tzx", "rms_tzy", ], ] mean_dict = {"period": period} for comp in comp_df.columns: mean_dict[comp] = comp_df.loc[:, comp].mean() rms_list.append(mean_dict) df = pd.DataFrame(rms_list) df = df.set_index("period") return df def plot_rms_per_period(self, plot_type="all", **kwargs): """ :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """ if plot_type == "all": df = self.rms_per_period_all.copy() elif plot_type == "comp": df = self.rms_per_period_per_component.copy() plot_list = [] color_list = [] for comp in df.columns: if not np.all(np.isnan(df[comp])): plot_list.append(comp) color_list.append(self.color_dict[comp]) ax = df.plot.bar( y=plot_list, color=color_list, xlabel="Period (s)", ylabel="normalized RMS", grid=True, **kwargs, ) ax.set_axisbelow(True) return ax def plot_rms(self, **kwargs): """ plot RMS in different views :param **kwargs: DESCRIPTION :type **kwargs: TYPE :return: DESCRIPTION :rtype: TYPE """
plot_rms = PlotRMS(self.dataframe, **kwargs)
1
2023-10-11 22:24:50+00:00
24k
weavel-ai/promptmodel-python
promptmodel/llms/llm_proxy.py
[ { "identifier": "LLM", "path": "promptmodel/llms/llm.py", "snippet": "class LLM:\n def __init__(self):\n pass\n\n @classmethod\n def __parse_output_pattern__(\n cls,\n raw_output: Optional[str] = None,\n parsing_type: Optional[ParsingType] = None,\n ) -> ParseResu...
from typing import ( Any, AsyncGenerator, Callable, Dict, Generator, List, Optional, Tuple, Union, ) from uuid import UUID from threading import Thread from rich import print from uuid import uuid4 from litellm.utils import ModelResponse, get_max_tokens from promptmodel.llms.llm import LLM from promptmodel.database.models import ( DeployedPrompt, DeployedFunctionModel, DeployedFunctionModelVersion, ) from promptmodel.database.crud import ( get_deployed_prompts, ) from promptmodel.promptmodel_init import CacheManager from promptmodel.utils.config_utils import read_config, upsert_config from promptmodel.utils.random_utils import select_version_by_ratio from promptmodel.utils import logger from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.utils.token_counting import ( num_tokens_for_messages_for_each, num_tokens_from_functions_input, ) from promptmodel.utils.output_utils import update_dict from promptmodel.apis.base import AsyncAPIClient from promptmodel.types.response import ( LLMResponse, LLMStreamResponse, FunctionModelConfig, ChatModelConfig, UnitConfig, PMDetail, ) from promptmodel.types.request import ChatLogRequest
20,045
tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream)(inputs, **kwargs) def run_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run_and_parse)(inputs, **kwargs) def arun_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs) def stream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs) def astream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs) def chat_run( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat(super().run)(session_uuid, **kwargs) def chat_arun( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat(super().arun)(session_uuid, **kwargs) def chat_stream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs) def chat_astream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs) @staticmethod async def fetch_prompts( name, version: Optional[Union[str, int]] = "deploy", ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]: """fetch prompts. Args: name (str): name of FunctionModel Returns: Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail) """ # Check connection activate config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return [], {} elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return [], {} else: if ( "project" in config and "use_cache" in config["project"] and config["project"]["use_cache"] == True and version == "deploy" ):
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) ) return wrapper def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call async_gen with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) log_uuid = str(uuid4()) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None api_response: Optional[ModelResponse] = None async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item # # add string_cache in model_response # if api_response: # if "message" not in api_response.choices[0]: # api_response.choices[0].message = {} # if "content" not in api_response.choices[0].message: # api_response.choices[0].message["content"] = string_cache # api_response.choices[0].message["role"] = "assistant" metadata = { "error": error_occurs, "error_log": error_log, } await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) # raise Exception("error_log") return wrapper def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) ) else: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) # messages, model, uuid = self._fetch_prompts() call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) else: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( message=llm_response.api_response.choices[ 0 ].message.model_dump(), uuid=log_uuid, metadata=metadata, api_response=api_response, ) ], ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=llm_response.api_response.choices[ 0 ].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) ) return wrapper def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) return wrapper def _prepare_call_args( self, prompts: List[Dict[str, str]], version_detail: Dict[str, Any], inputs: Dict[str, Any], kwargs, ): stringified_inputs = {key: str(value) for key, value in inputs.items()} messages = [ { "content": prompt["content"].format(**stringified_inputs), "role": prompt["role"], } for prompt in prompts ] call_args = { "messages": messages, "model": version_detail["model"] if version_detail else None, "parsing_type": version_detail["parsing_type"] if version_detail else None, "output_keys": version_detail["output_keys"] if version_detail else None, } if call_args["parsing_type"] is None: del call_args["parsing_type"] del call_args["output_keys"] if "functions" in kwargs: call_args["functions"] = kwargs["functions"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] return call_args def _prepare_call_args_for_chat( self, messages: List[Dict[str, Any]], version_detail: Dict[str, Any], kwargs, ): call_args = {} token_per_tools = 0 if "functions" in kwargs: call_args["functions"] = kwargs["functions"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["functions"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) if "tools" in kwargs: call_args["tools"] = kwargs["tools"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["tools"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) # truncate messages to make length <= model's max length model_max_tokens = get_max_tokens( model=version_detail["model"] if version_detail else "gpt-3.5-turbo" ) token_per_messages = num_tokens_for_messages_for_each( messages, version_detail["model"] ) token_limit_exceeded = ( sum(token_per_messages) + token_per_tools ) - model_max_tokens if token_limit_exceeded > 0: while token_limit_exceeded > 0: # erase the second oldest message (first one is system prompt, so it should not be erased) if len(messages) == 1: # if there is only one message, Error cannot be solved. Just call LLM and get error response break token_limit_exceeded -= token_per_messages[1] del messages[1] del token_per_messages[1] call_args["messages"] = messages call_args["model"] = version_detail["model"] if version_detail else None if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] return call_args async def _async_log_to_cloud( self, version_uuid: str, log_uuid: str, inputs: Optional[Dict] = None, api_response: Optional[ModelResponse] = None, parsed_outputs: Optional[Dict] = None, metadata: Optional[Dict] = None, ): config = read_config() if ( "project" in config and "mask_inputs" in config["project"] and config["project"]["mask_inputs"] == True ): inputs = {key: "PRIVATE LOGGING" for key, value in inputs.items()} # Perform the logging asynchronously if api_response: api_response_dict = api_response.model_dump() api_response_dict["response_ms"] = api_response._response_ms api_response_dict["_response_ms"] = api_response._response_ms else: api_response_dict = None run_log_request_body = { "uuid": log_uuid, "api_response": api_response_dict, "inputs": inputs, "parsed_outputs": parsed_outputs, "metadata": metadata, } res = await AsyncAPIClient.execute( method="POST", path="/run_log", params={ "version_uuid": version_uuid, }, json=run_log_request_body, use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to log to cloud: {res.json()}[/red]"); if self.unit_config: res_connect = await AsyncAPIClient.execute( method="POST", path="/unit/connect", json={ "unit_log_uuid": self.unit_config.log_uuid, "run_log_uuid": log_uuid, }, use_cli_key=False, ) if res_connect.status_code != 200: print(f"[red]Failed to connect prompt component to run log: {res_connect.json()}[/red]") return res async def _async_chat_log_to_cloud( self, session_uuid: str, version_uuid: Optional[str] = None, chat_log_request_list: List[ChatLogRequest] = [], ): # Perform the logging asynchronously res = await AsyncAPIClient.execute( method="POST", path="/chat_log", params={ "session_uuid": session_uuid, "version_uuid": version_uuid, }, json=[r.model_dump() for r in chat_log_request_list], use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to log to cloud: {res.json()}[/red]") return res async def _async_make_session_cloud( self, session_uuid: str, version_uuid: Optional[str] = None, ): # Perform the logging asynchronously res = await AsyncAPIClient.execute( method="POST", path="/make_session", params={ "session_uuid": session_uuid, "version_uuid": version_uuid, }, use_cli_key=False, ) if res.status_code != 200: print(f"[red]Failed to make ChatSession in cloud: {res.json()}[/red]") return res def make_kwargs(self, **kwargs): res = {} for key, value in kwargs.items(): if value is not None: res[key] = value return res def run( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run)(inputs, **kwargs) def arun( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun)(inputs, **kwargs) def stream( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream)(inputs, **kwargs) def astream( self, inputs: Optional[Dict[str, Any]] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream)(inputs, **kwargs) def run_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_method(super().run_and_parse)(inputs, **kwargs) def arun_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs) def stream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> Generator[LLMStreamResponse, None, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs) def astream_and_parse( self, inputs: Dict[str, Any] = {}, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> AsyncGenerator[LLMStreamResponse, None]: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs) def chat_run( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat(super().run)(session_uuid, **kwargs) def chat_arun( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat(super().arun)(session_uuid, **kwargs) def chat_stream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs) def chat_astream( self, session_uuid: str, functions: Optional[List[Any]] = None, tools: Optional[List[Any]] = None, api_key: Optional[str] = None, ) -> LLMResponse: kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools) return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs) @staticmethod async def fetch_prompts( name, version: Optional[Union[str, int]] = "deploy", ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]: """fetch prompts. Args: name (str): name of FunctionModel Returns: Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail) """ # Check connection activate config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return [], {} elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return [], {} else: if ( "project" in config and "use_cache" in config["project"] and config["project"]["use_cache"] == True and version == "deploy" ):
cache_manager = CacheManager()
5
2023-10-09 03:35:44+00:00
24k
cambridgeltl/ClaPS
algs/greedy.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: L...
import random import numpy as np from typing import Any, Optional from rewards.text_classification_reward import ( PromptedClassificationReward, ) from utils.fsc_datasets import PromptedClassificationDataset from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
18,712
class GreedyTrainer(BaseTrainer): def __init__( self, obj_func: PromptedClassificationReward,
class GreedyTrainer(BaseTrainer): def __init__( self, obj_func: PromptedClassificationReward,
prompt_dataset: PromptedClassificationDataset,
3
2023-10-08 12:39:44+00:00
24k
MachinePerceptionLab/Attentive_DFPrior
src/DF_Prior.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg):" }, { "identifier": "Mapper", "path": "src/Mapper.py", "snippet": "class Mapper(object):\n \"\"\"\n Mapper thread. \n\n...
import os import time import numpy as np import torch import torch.multiprocessing import torch.multiprocessing as mp from src import config from src.Mapper import Mapper from src.Tracker import Tracker from src.utils.datasets import get_dataset from src.utils.Logger import Logger from src.utils.Mesher import Mesher from src.utils.Renderer import Renderer
20,088
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass
self.frame_reader = get_dataset(cfg, args, self.scale)
3
2023-10-13 00:49:57+00:00
24k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/naive_bayes.py
[ { "identifier": "BaseEstimator", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class BaseEstimator(_MetadataRequester):\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be se...
import warnings import numpy as np from abc import ABCMeta, abstractmethod from numbers import Integral, Real from scipy.special import logsumexp from .base import BaseEstimator, ClassifierMixin, _fit_context from .preprocessing import LabelBinarizer, binarize, label_binarize from .utils._param_validation import Hidden, Interval, StrOptions from .utils.extmath import safe_sparse_dot from .utils.multiclass import _check_partial_fit_first_call from .utils.validation import _check_sample_weight, check_is_fitted, check_non_negative
15,891
X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) X = self._check_X(X) jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(_BaseNB): """ Gaussian Naive Bayes (GaussianNB). Can perform online updates to model parameters via :meth:`partial_fit`. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Parameters ---------- priors : array-like of shape (n_classes,), default=None Prior probabilities of the classes. If specified, the priors are not adjusted according to the data. var_smoothing : float, default=1e-9 Portion of the largest variance of all features that is added to variances for calculation stability. .. versionadded:: 0.20 Attributes ---------- class_count_ : ndarray of shape (n_classes,) number of training samples observed in each class. class_prior_ : ndarray of shape (n_classes,) probability of each class. classes_ : ndarray of shape (n_classes,) class labels known to the classifier. epsilon_ : float absolute additive value to variances. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 var_ : ndarray of shape (n_classes, n_features) Variance of each feature per class. .. versionadded:: 1.0 theta_ : ndarray of shape (n_classes, n_features) mean of each feature per class. See Also -------- BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. CategoricalNB : Naive Bayes classifier for categorical features. ComplementNB : Complement Naive Bayes classifier. MultinomialNB : Naive Bayes classifier for multinomial models. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB() >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "priors": ["array-like", None],
""" The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Author: Vincent Michel <vincent.michel@inria.fr> # Minor fixes by Fabian Pedregosa # Amit Aides <amitibo@tx.technion.ac.il> # Yehuda Finkelstein <yehudaf@tx.technion.ac.il> # Lars Buitinck # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # (parts based on earlier work by Mathieu Blondel) # # License: BSD 3 clause __all__ = [ "BernoulliNB", "GaussianNB", "MultinomialNB", "ComplementNB", "CategoricalNB", ] class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta): """Abstract base class for naive Bayes estimators""" @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape (n_samples, n_classes). Public methods predict, predict_proba, predict_log_proba, and predict_joint_log_proba pass the input through _check_X before handing it over to _joint_log_likelihood. The term "joint log likelihood" is used interchangibly with "joint log probability". """ @abstractmethod def _check_X(self, X): """To be overridden in subclasses with the actual checks. Only used in predict* methods. """ def predict_joint_log_proba(self, X): """Return joint log probability estimates for the test vector X. For each row x of X and class y, the joint log probability is given by ``log P(x, y) = log P(y) + log P(x|y),`` where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is the class-conditional probability. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : ndarray of shape (n_samples, n_classes) Returns the joint log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) X = self._check_X(X) return self._joint_log_likelihood(X) def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X. """ check_is_fitted(self) X = self._check_X(X) jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) X = self._check_X(X) jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(_BaseNB): """ Gaussian Naive Bayes (GaussianNB). Can perform online updates to model parameters via :meth:`partial_fit`. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Parameters ---------- priors : array-like of shape (n_classes,), default=None Prior probabilities of the classes. If specified, the priors are not adjusted according to the data. var_smoothing : float, default=1e-9 Portion of the largest variance of all features that is added to variances for calculation stability. .. versionadded:: 0.20 Attributes ---------- class_count_ : ndarray of shape (n_classes,) number of training samples observed in each class. class_prior_ : ndarray of shape (n_classes,) probability of each class. classes_ : ndarray of shape (n_classes,) class labels known to the classifier. epsilon_ : float absolute additive value to variances. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 var_ : ndarray of shape (n_classes, n_features) Variance of each feature per class. .. versionadded:: 1.0 theta_ : ndarray of shape (n_classes, n_features) mean of each feature per class. See Also -------- BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. CategoricalNB : Naive Bayes classifier for categorical features. ComplementNB : Complement Naive Bayes classifier. MultinomialNB : Naive Bayes classifier for multinomial models. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB() >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "priors": ["array-like", None],
"var_smoothing": [Interval(Real, 0, None, closed="left")],
7
2023-10-07 13:19:48+00:00
24k
zbzhu99/madiff
diffuser/models/diffusion.py
[ { "identifier": "DPM_Solver", "path": "diffuser/utils/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding...
import functools import numpy as np import torch import torch.nn.functional as F import diffuser.utils as utils from torch import nn from diffuser.utils.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper from .helpers import Losses, apply_conditioning, cosine_beta_schedule, extract
20,879
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights)
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights)
self.loss_fn = Losses[loss_type](loss_weights, self.action_dim)
3
2023-10-13 13:03:53+00:00
24k
hellloxiaotian/KDNet
train_KDNet.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distill...
import argparse import logging import math import os import random import time import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml import test # import test.py to get mAP after each epoch from copy import deepcopy from pathlib import Path from threading import Thread from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.experimental import attempt_load from models.experimental import attempt_loadv5 from models.experimental import attempt_load_zxy from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss, ComputeLossOTA from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.distill_utils import getMask, compute_mask_loss
18,981
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank):
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
7
2023-10-08 13:05:58+00:00
24k
falesiani/torch_ga
tests/test_keras.py
[ { "identifier": "GeometricProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricProductDense(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric multiplication instead of standard\n multip...
import unittest as ut import h5py import torch import torch.nn as nn import torch.nn.functional as F import torch from io import BytesIO from torch_ga.layers import ( GeometricProductDense, GeometricSandwichProductDense, GeometricProductElementwise, GeometricSandwichProductElementwise, GeometricProductConv1D, GeometricAlgebraExp, GeometricToTensor, GeometricToTensorWithKind, TensorToGeometric, TensorWithKindToGeometric, ) from torch_ga.blades import BladeKind from torch_ga import GeometricAlgebra
18,116
sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector + vector -> scalar + bivector + vector expected_result_indices = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) self.assertTrue(torch.all(sta.is_pure(result, expected_result_indices))) def test_geometric_product_dense_s_mv(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.ones([20, 6, 1]), torch.zeros([20, 6, 15])], axis=-1 ) mv_blade_indices = list(range(16)) geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=mv_blade_indices ) geom_prod_layer.build(geom_tensor.shape) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_s_mv:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # scalar * multivector + multivector -> multivector # Check that nothing is zero (it would be extremely unlikely # but not impossible to randomly get a zero here). assert torch.all(result != 0.0) # self.assertTrue(tf.reduce_all(result != 0.0)) def test_geometric_product_dense_sequence(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([20, 6, 4]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # vector * vector + vector -> scalar + bivector + vector scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] sequence = nn.Sequential(*[ TensorToGeometric(sta, blade_indices=vector_blade_indices), GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ), GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices) ]) for e in sequence: e.build(tensor.shape) result = sequence(tensor) print(f"test_geometric_product_dense_sequence:") print(f"tensor={tensor}") print(f"result={result}") self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0)
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_kind_to_geom_layer = TensorWithKindToGeometric( sta, BladeKind.VECTOR) self.assertTensorsEqual( tensor_kind_to_geom_layer(tensor), gt_geom_tensor) def test_geometric_to_tensor(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices) self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor) def test_geometric_to_tensor_with_kind(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector + vector -> scalar + bivector + vector expected_result_indices = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) self.assertTrue(torch.all(sta.is_pure(result, expected_result_indices))) def test_geometric_product_dense_s_mv(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.ones([20, 6, 1]), torch.zeros([20, 6, 15])], axis=-1 ) mv_blade_indices = list(range(16)) geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=mv_blade_indices ) geom_prod_layer.build(geom_tensor.shape) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_s_mv:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # scalar * multivector + multivector -> multivector # Check that nothing is zero (it would be extremely unlikely # but not impossible to randomly get a zero here). assert torch.all(result != 0.0) # self.assertTrue(tf.reduce_all(result != 0.0)) def test_geometric_product_dense_sequence(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([20, 6, 4]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # vector * vector + vector -> scalar + bivector + vector scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] sequence = nn.Sequential(*[ TensorToGeometric(sta, blade_indices=vector_blade_indices), GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ), GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices) ]) for e in sequence: e.build(tensor.shape) result = sequence(tensor) print(f"test_geometric_product_dense_sequence:") print(f"tensor={tensor}") print(f"result={result}") self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0)
geom_prod_layer = GeometricSandwichProductDense(
1
2023-10-07 13:34:07+00:00
24k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n ...
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
15,383
# Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager): if member.weekly_checkins == 5: return # If already completed 5 check-ins, do nothing user = bot.get_user(member.discord_id) if user: # Notify the admin that a status request is being sent admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"Status request sent to {member.name}.") # Cancel the previous task if it exists ongoing_task: Task = ongoing_status_requests.get(member.discord_id) if ongoing_task: ongoing_task.cancel() # Retrieve all commit messages for the member commit_messages = get_all_commit_messages_for_user(ORG_NAME, ORG_TOKEN, member) if not commit_messages: summarized_report = "You have no commits for the previous working day." msg = f"{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." else: summarized_report = await updates_manager.summarize_technical_updates(commit_messages) msg = f"Here's your summarized report based on your commits:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." raw_updates = summarized_report # Send initial message and wait for reaction await user.send( f"# Good morning {member.name}, time for your daily status update!\n" f"### I'm first going to check your commit messages and try to build a technical report for you.\n" f"### Next I will ask you for any non-technical updates from your previous work day.\n" f"### Finally I will ask you what you plan to work on today." ) sent_message = await user.send(msg) await sent_message.add_reaction(THUMBS_UP_EMOJI) await sent_message.add_reaction(PENCIL_EMOJI) await sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) def check(m) -> bool: return m.author == user and isinstance(m.channel, DMChannel) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, reactor = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await sent_message.remove_reaction(emoji, bot.user) while str(reaction.emoji) in [PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: if str(reaction.emoji) == PENCIL_EMOJI: await user.send("What would you like me to change?") # Store the new wait_for message (feedback) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task feedback = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the feedback # Send original + feedback to LLM for reformatting summarized_report = await updates_manager.summarize_feedback_and_revisions(summarized_report, feedback.content) elif str(reaction.emoji) == REPORT_SUBMISSION_EMOJI: await user.send("Please submit your technical report directly.") # Store the new wait_for message (report submission) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task direct_report = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the report summarized_report = direct_report.content break # Exit the while loop as the user has submitted their report directly msg = f"Here's the revised report:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." last_sent_message = await send_long_message(user, msg) if last_sent_message: await last_sent_message.add_reaction(THUMBS_UP_EMOJI) await last_sent_message.add_reaction(PENCIL_EMOJI) await last_sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == last_sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, user = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await last_sent_message.remove_reaction(emoji, bot.user) # Prompt user for non-technical updates from the previous day non_technical_msg_prompt = "Please provide any non-technical updates from your previous working day, e.g., important meetings, interviews, etc." await user.send(non_technical_msg_prompt) # Store the new wait_for message (non-technical update) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task non_technical_update_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the non-technical update raw_updates += f"\n\n{non_technical_update_raw.content}" # Summarize non-technical update with LLM non_technical_update = await updates_manager.summarize_non_technical_updates(non_technical_update_raw.content) # Prompt user for their goals for the day goals_msg_prompt = "What do you plan to work on or accomplish today?" await user.send(goals_msg_prompt) # Store the new wait_for message (goals for the day) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task goals_for_today_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the goals # Summarize goals for the day with LLM goals_for_today = await updates_manager.summarize_goals_for_the_day(goals_for_today_raw.content) # Update the streak for this member streak = streaks_manager.get_streak(member.discord_id) streaks_manager.update_streak(member.discord_id, streak + 1) member.update_streak(streaks_manager.get_streak(member.discord_id)) member.increment_weekly_checkins() raw_updates += f"\n\n{goals_for_today_raw.content}" final_updates = f"{summarized_report}\n\n{non_technical_update}\n\n{goals_for_today}" updates_manager.insert_status(member.discord_id, raw_updates, member.time_zone) updates_manager.update_summarized_status(member.discord_id, final_updates) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) # Member name update as a header member_update_header = f"## {member.name}'s Update:" # Compile the final report with Markdown formatting final_report = ( f"\n### Technical Update:\n" f"{summarized_report}\n" f"### Non-Technical Update:\n" f"{non_technical_update}\n" f"### Goals for Today:\n" f"{goals_for_today}" ) stand_up_feedback = await updates_manager.evaluate_performance(final_report) # Concatenate the member name update with the final report and send to the designated Discord channel complete_message = f"{member_update_header}{final_report}" guild = bot.get_guild(GUILD_TOKEN) channel_to_post_in = guild.get_channel(CHANNEL_TOKEN) await user.send(stand_up_feedback) await send_long_message(channel_to_post_in, complete_message) async def send_long_message(destination, msg): max_length = 2000 # Discord's max character limit for a message sent_messages = [] # Keep track of all messages sent while len(msg) > 0: # If the message is shorter than the max length, send it as is if len(msg) <= max_length: sent_message = await destination.send(msg) sent_messages.append(sent_message) break # The message is sent, so break out of the loop # Find the nearest newline character before the max_length split_index = msg.rfind('\n', 0, max_length) # If no newline is found, just split at max_length if split_index == -1: split_index = max_length # Split the message at the found index and send the first part part_to_send = msg[:split_index].strip() sent_message = await destination.send(part_to_send) sent_messages.append(sent_message) # Wait a bit to respect Discord's rate limits await asyncio.sleep(1) # Remove the part that was sent from the message msg = msg[split_index:].strip() # Return the last message sent for reaction addition return sent_messages[-1] if sent_messages else None @bot.command(name='viewscheduledjobs') async def view_scheduled_jobs(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view scheduled jobs.") return # Get all scheduled jobs using the Scheduler's method scheduled_jobs = scheduler.get_all_scheduled_jobs(team_member_manager) # Send the scheduled jobs to the admin user for job in scheduled_jobs: await ctx.send(job) @bot.command(name='statusrequest') async def status_request(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to request status.") return # Find the member object using the Discord ID member_to_request = team_member_manager.find_member(discord_id) if member_to_request: for member in team_member_manager.team_members: scheduler.remove_job(member.discord_id) scheduler.unschedule_weekly_post() # Send the status request to the member await ctx.send(f"Status request sent to user with Discord ID {discord_id}.") for member in team_member_manager.team_members: scheduler.add_job(send_status_request, member, weekly_post_manager, streaks_manager, updates_manager) scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await send_status_request(member_to_request, weekly_post_manager, streaks_manager, updates_manager) await ctx.send(f"Status request received from user with Discord ID {discord_id}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='adduser') async def add_user(ctx, discord_id: int, time_zone: str, name: str, github_username: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to add users.") return # Add the new member using team_member_manager team_member_manager.add_member(discord_id, name, time_zone, github_username) # Update the weekly post to include the new member new_member = team_member_manager.find_member(discord_id) if new_member: await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.add_job(send_status_request, new_member, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User {name} added successfully.") @bot.command(name='removeuser') async def remove_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to remove users.") return # Find the member object member_to_remove = team_member_manager.find_member(discord_id) if member_to_remove: # Remove the member from the database team_member_manager.remove_member(discord_id) # Update the weekly post to remove the member await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.remove_job(discord_id) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User with Discord ID {discord_id} removed successfully.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='listusers') async def list_users(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to list users.") return # List users using team_member_manager users = [(member.discord_id, member.name, member.time_zone, member.github_username, member.current_streak) for member in team_member_manager.team_members] user_list = '\n'.join([f"Name: {user[1]}, Discord ID: {user[0]}, Time Zone: {user[2]}, GitHub Username: {user[3]}, Current Streak: {user[4]}" for user in users]) await ctx.send(f"List of users:\n{user_list}") @bot.command(name='updatetimezone') async def update_timezone(ctx, discord_id: int, new_time_zone: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update timezones.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the timezone in the database team_member_manager.update_member_timezone(discord_id, new_time_zone) scheduler.remove_job(discord_id) scheduler.add_job(send_status_request, member_to_update, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"Timezone for user with Discord ID {discord_id} updated to {new_time_zone}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='updatestreak') async def update_streak(ctx, discord_id: int, new_streak: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
updates_db = UpdatesDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
2
2023-10-12 02:01:46+00:00
24k
azuline/rose
rose/virtualfs.py
[ { "identifier": "SUPPORTED_AUDIO_EXTENSIONS", "path": "rose/audiotags.py", "snippet": "SUPPORTED_AUDIO_EXTENSIONS = [\n \".mp3\",\n \".m4a\",\n \".ogg\",\n \".opus\",\n \".flac\",\n]" }, { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:...
import collections import contextlib import errno import logging import os import random import re import stat import subprocess import tempfile import time import llfuse from collections.abc import Iterator from dataclasses import dataclass from pathlib import Path from typing import Any, Generic, Literal, TypeVar from rose.audiotags import SUPPORTED_AUDIO_EXTENSIONS, AudioTags from rose.cache import ( STORED_DATA_FILE_REGEX, CachedRelease, CachedTrack, artist_exists, calculate_release_logtext, calculate_track_logtext, genre_exists, get_collage, get_playlist, get_release, get_track, get_tracks_associated_with_release, label_exists, list_artists, list_collages, list_genres, list_labels, list_playlists, list_releases_delete_this, update_cache_for_releases, ) from rose.collages import ( add_release_to_collage, create_collage, delete_collage, remove_release_from_collage, rename_collage, ) from rose.common import RoseError, sanitize_dirname, sanitize_filename from rose.config import Config from rose.playlists import ( add_track_to_playlist, create_playlist, delete_playlist, delete_playlist_cover_art, remove_track_from_playlist, rename_playlist, set_playlist_cover_art, ) from rose.releases import ( delete_release, set_release_cover_art, ) from rose.templates import PathTemplate, eval_release_template, eval_track_template
19,926
return VirtualPath(view="Genres") if len(parts) == 2: return VirtualPath(view="Genres", genre=parts[1]) if len(parts) == 3: return VirtualPath(view="Genres", genre=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Genres", genre=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "6. Labels": if len(parts) == 1: return VirtualPath(view="Labels") if len(parts) == 2: return VirtualPath(view="Labels", label=parts[1]) if len(parts) == 3: return VirtualPath(view="Labels", label=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Labels", label=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "7. Collages": if len(parts) == 1: return VirtualPath(view="Collages") if len(parts) == 2: return VirtualPath(view="Collages", collage=parts[1]) if len(parts) == 3: return VirtualPath(view="Collages", collage=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath( view="Collages", collage=parts[1], release=parts[2], file=parts[3] ) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "8. Playlists": if len(parts) == 1: return VirtualPath(view="Playlists") if len(parts) == 2: return VirtualPath(view="Playlists", playlist=parts[1]) if len(parts) == 3: return VirtualPath( view="Playlists", playlist=parts[1], file=parts[2], ) raise llfuse.FUSEError(errno.ENOENT) raise llfuse.FUSEError(errno.ENOENT) class VirtualNameGenerator: """ Generates virtual dirnames and filenames for releases and tracks, and maintains an inverse mapping for looking up release/track UUIDs from their virtual paths. This object's data has the following lifecycle: 1. RoseLogicalCore calls `list_virtual_x_paths` to generate all paths in a directory. 2. Once generated, path->ID can be looked up. This means that RoseLogicalCore is responsible for invoking `list_virtual_x_paths` upon cache misses / missing file accesses. We end up invoking `list_virtual_x_path` whenever a non-existent path is getattr'ed, which is somewhat excessive, however, we can decouple the virtual templates from the cache this way, and the lookup _miss_ case should be rather rare in normal operations The VirtualNameGenerator also remembers all previous path mappings for 15 minutes since last use. This allows Rose to continue to serving accesses to old paths, even after the file metadata changed. This is useful, for example, if a directory or file is renamed (due to a metadata change) while its tracks are in a mpv playlist. mpv's requests to the old paths will still resolve, but the old paths will not show up in a readdir call. If old paths collide with new paths, new paths will take precedence. """ def __init__(self, config: Config): # fmt: off self._config = config # These are the stateful maps that we use to remember path mappings. They are maps from the # (parent_path, virtual path) -> entity ID. # # Entries expire after 15 minutes, which implements the "serve accesses to previous paths" # behavior as specified in the class docstring. self._release_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) self._track_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) # Cache template evaluations because they're expensive. self._release_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} self._track_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} # fmt: on def list_release_paths( self, release_parent: VirtualPath, releases: list[CachedRelease], ) -> Iterator[tuple[CachedRelease, str]]: """ Given a parent directory and a list of releases, calculates the virtual directory names for those releases, and returns a zipped iterator of the releases and their virtual directory names. """ # For collision number generation. seen: set[str] = set() prefix_pad_size = len(str(len(releases))) for idx, release in enumerate(releases): # Determine the proper template. template = None if release_parent.view == "Releases": template = self._config.path_templates.all_releases.release elif release_parent.view == "New": template = self._config.path_templates.new_releases.release elif release_parent.view == "Recently Added": template = self._config.path_templates.recently_added_releases.release elif release_parent.view == "Artists": template = self._config.path_templates.artists.release elif release_parent.view == "Genres": template = self._config.path_templates.genres.release elif release_parent.view == "Labels": template = self._config.path_templates.labels.release elif release_parent.view == "Collages": template = self._config.path_templates.collages.release else: raise RoseError(f"VNAMES: No release template found for {release_parent=}.")
""" The virtualfs module renders a virtual filesystem from the read cache. It is written in an Object-Oriented style, against my typical sensibilities, because that's how the FUSE libraries tend to be implemented. But it's OK :) Since this is a pretty hefty module, we'll cover the organization. This module contains 8 classes: 1. TTLCache: A wrapper around dict that expires key/value pairs after a given TTL. 2. VirtualPath: A semantic representation of a path in the virtual filesystem along with a parser. All virtual filesystem paths are parsed by this class into a far more ergonomic dataclass. 3. VirtualNameGenerator: A class that generates virtual directory and filenames given releases and tracks, and maintains inverse mappings for resolving release IDs from virtual paths. 4. "CanShow"er: An abstraction that encapsulates the logic of whether an artist, genre, or label should be shown in their respective virtual views, based on the whitelist/blacklist configuration parameters. 5. FileHandleGenerator: A class that keeps generates new file handles. It is a counter that wraps back to 5 when the file handles exceed ~10k, as to avoid any overflows. 6. RoseLogicalCore: A logical representation of Rose's filesystem logic, freed from the annoying implementation details that a low-level library like `llfuse` comes with. 7. INodeMapper: A class that tracks the INode <-> Path mappings. It is used to convert inodes to paths in VirtualFS. 8. VirtualFS: The main Virtual Filesystem class, which manages the annoying implementation details of a low-level virtual filesystem, and delegates logic to the above classes. It uses INodeMapper and VirtualPath to translate inodes into semantically useful dataclasses, and then passes them into RoseLogicalCore. """ from __future__ import annotations logger = logging.getLogger(__name__) K = TypeVar("K") V = TypeVar("V") T = TypeVar("T") class TTLCache(Generic[K, V]): """ TTLCache is a dictionary with a time-to-live (TTL) for each key/value pair. After the TTL passes, the key/value pair is no longer accessible. We do not currently free entries in this cache, because we expect little churn to occur in entries in normal operation. We do not have a great time to clear the cache that does not affect performance. We will probably implement freeing entries later when we give more of a shit or someone complains about the memory usage. I happen to have a lot of free RAM! """ def __init__(self, ttl_seconds: int = 5): self.ttl_seconds = ttl_seconds self.__backing: dict[K, tuple[V, float]] = {} def __contains__(self, key: K) -> bool: try: _, insert_time = self.__backing[key] except KeyError: return False return time.time() - insert_time <= self.ttl_seconds def __getitem__(self, key: K) -> V: v, insert_time = self.__backing[key] if time.time() - insert_time > self.ttl_seconds: raise KeyError(key) return v def __setitem__(self, key: K, value: V) -> None: self.__backing[key] = (value, time.time()) def __delitem__(self, key: K) -> None: del self.__backing[key] def get(self, key: K, default: T) -> V | T: try: return self[key] except KeyError: return default # In collages, playlists, and releases, we print directories with position of the release/track in # the collage. When parsing, strip it out. Otherwise we will have to handle this parsing in every # method. POSITION_REGEX = re.compile(r"^([^.]+)\. ") # In recently added, we print the date that the release was added to the library. When parsing, # strip it out. ADDED_AT_REGEX = re.compile(r"^\[[\d-]{10}\] ") @dataclass(frozen=True, slots=True) class VirtualPath: view: ( Literal[ "Root", "Releases", "Artists", "Genres", "Labels", "Collages", "Playlists", "New", "Recently Added", ] | None ) artist: str | None = None genre: str | None = None label: str | None = None collage: str | None = None playlist: str | None = None release: str | None = None file: str | None = None @property def release_parent(self) -> VirtualPath: """Parent path of a release: Used as an input to the VirtualNameGenerator.""" return VirtualPath( view=self.view, artist=self.artist, genre=self.genre, label=self.label, collage=self.collage, ) @property def track_parent(self) -> VirtualPath: """Parent path of a track: Used as an input to the VirtualNameGenerator.""" return VirtualPath( view=self.view, artist=self.artist, genre=self.genre, label=self.label, collage=self.collage, playlist=self.playlist, release=self.release, ) @classmethod def parse(cls, path: Path) -> VirtualPath: parts = str(path.resolve()).split("/")[1:] # First part is always empty string. if len(parts) == 1 and parts[0] == "": return VirtualPath(view="Root") # Let's abort early if we recognize a path that we _know_ is not valid. This is because # invalid file accesses trigger a recalculation of virtual file paths, which we decided to # do under the assumption that invalid file accesses would be _rare_. That's not true if we # keep getting requests for these stupid paths from shell plugins. if parts[-1] in [".git", ".DS_Store", ".Trash", ".Trash-1000", "HEAD", ".envrc"]: logger.debug( f"Raising ENOENT early in the VirtualPath parser because last path part {parts[-1]} in blacklist." ) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "1. Releases": if len(parts) == 1: return VirtualPath(view="Releases") if len(parts) == 2: return VirtualPath(view="Releases", release=parts[1]) if len(parts) == 3: return VirtualPath(view="Releases", release=parts[1], file=parts[2]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "2. Releases - New": if len(parts) == 1: return VirtualPath(view="New") if len(parts) == 2: return VirtualPath(view="New", release=parts[1]) if len(parts) == 3: return VirtualPath(view="New", release=parts[1], file=parts[2]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "3. Releases - Recently Added": if len(parts) == 1: return VirtualPath(view="Recently Added") if len(parts) == 2: return VirtualPath(view="Recently Added", release=parts[1]) if len(parts) == 3: return VirtualPath(view="Recently Added", release=parts[1], file=parts[2]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "4. Artists": if len(parts) == 1: return VirtualPath(view="Artists") if len(parts) == 2: return VirtualPath(view="Artists", artist=parts[1]) if len(parts) == 3: return VirtualPath(view="Artists", artist=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Artists", artist=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "5. Genres": if len(parts) == 1: return VirtualPath(view="Genres") if len(parts) == 2: return VirtualPath(view="Genres", genre=parts[1]) if len(parts) == 3: return VirtualPath(view="Genres", genre=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Genres", genre=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "6. Labels": if len(parts) == 1: return VirtualPath(view="Labels") if len(parts) == 2: return VirtualPath(view="Labels", label=parts[1]) if len(parts) == 3: return VirtualPath(view="Labels", label=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath(view="Labels", label=parts[1], release=parts[2], file=parts[3]) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "7. Collages": if len(parts) == 1: return VirtualPath(view="Collages") if len(parts) == 2: return VirtualPath(view="Collages", collage=parts[1]) if len(parts) == 3: return VirtualPath(view="Collages", collage=parts[1], release=parts[2]) if len(parts) == 4: return VirtualPath( view="Collages", collage=parts[1], release=parts[2], file=parts[3] ) raise llfuse.FUSEError(errno.ENOENT) if parts[0] == "8. Playlists": if len(parts) == 1: return VirtualPath(view="Playlists") if len(parts) == 2: return VirtualPath(view="Playlists", playlist=parts[1]) if len(parts) == 3: return VirtualPath( view="Playlists", playlist=parts[1], file=parts[2], ) raise llfuse.FUSEError(errno.ENOENT) raise llfuse.FUSEError(errno.ENOENT) class VirtualNameGenerator: """ Generates virtual dirnames and filenames for releases and tracks, and maintains an inverse mapping for looking up release/track UUIDs from their virtual paths. This object's data has the following lifecycle: 1. RoseLogicalCore calls `list_virtual_x_paths` to generate all paths in a directory. 2. Once generated, path->ID can be looked up. This means that RoseLogicalCore is responsible for invoking `list_virtual_x_paths` upon cache misses / missing file accesses. We end up invoking `list_virtual_x_path` whenever a non-existent path is getattr'ed, which is somewhat excessive, however, we can decouple the virtual templates from the cache this way, and the lookup _miss_ case should be rather rare in normal operations The VirtualNameGenerator also remembers all previous path mappings for 15 minutes since last use. This allows Rose to continue to serving accesses to old paths, even after the file metadata changed. This is useful, for example, if a directory or file is renamed (due to a metadata change) while its tracks are in a mpv playlist. mpv's requests to the old paths will still resolve, but the old paths will not show up in a readdir call. If old paths collide with new paths, new paths will take precedence. """ def __init__(self, config: Config): # fmt: off self._config = config # These are the stateful maps that we use to remember path mappings. They are maps from the # (parent_path, virtual path) -> entity ID. # # Entries expire after 15 minutes, which implements the "serve accesses to previous paths" # behavior as specified in the class docstring. self._release_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) self._track_store: TTLCache[tuple[VirtualPath, str], str] = TTLCache(ttl_seconds=60 * 15) # Cache template evaluations because they're expensive. self._release_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} self._track_template_eval_cache: dict[tuple[VirtualPath, PathTemplate, str, str | None], str] = {} # fmt: on def list_release_paths( self, release_parent: VirtualPath, releases: list[CachedRelease], ) -> Iterator[tuple[CachedRelease, str]]: """ Given a parent directory and a list of releases, calculates the virtual directory names for those releases, and returns a zipped iterator of the releases and their virtual directory names. """ # For collision number generation. seen: set[str] = set() prefix_pad_size = len(str(len(releases))) for idx, release in enumerate(releases): # Determine the proper template. template = None if release_parent.view == "Releases": template = self._config.path_templates.all_releases.release elif release_parent.view == "New": template = self._config.path_templates.new_releases.release elif release_parent.view == "Recently Added": template = self._config.path_templates.recently_added_releases.release elif release_parent.view == "Artists": template = self._config.path_templates.artists.release elif release_parent.view == "Genres": template = self._config.path_templates.genres.release elif release_parent.view == "Labels": template = self._config.path_templates.labels.release elif release_parent.view == "Collages": template = self._config.path_templates.collages.release else: raise RoseError(f"VNAMES: No release template found for {release_parent=}.")
logtext = calculate_release_logtext(
6
2023-10-09 14:42:23+00:00
24k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbrcsl_mlpbeh_roboverse.py
[ { "identifier": "RcslModule", "path": "offlinerlkit/modules/rcsl_module.py", "snippet": "class RcslModule(nn.Module):\n '''\n rcsl policy network\n '''\n def __init__(\n self,\n backbone: nn.Module,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n ...
import numpy as np import torch import roboverse import argparse import os import random import pickle import datetime from copy import deepcopy from typing import Dict, Tuple from collections import defaultdict from offlinerlkit.modules import TransformerDynamicsModel, RcslModule from offlinerlkit.dynamics import TransformerDynamics from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import RcslPolicyTrainer, DiffusionPolicyTrainer from offlinerlkit.utils.none_or_str import none_or_str from offlinerlkit.policy import SimpleDiffusionPolicy, AutoregressivePolicy, RcslPolicy from offlinerlkit.nets import MLP
15,345
max_rewards = np.maximum(max_rewards, rewards.flatten()) # Update max reward acc_returns = acc_returns + rewards.flatten() observations = deepcopy(next_observations) for k, v in rollout_transitions.items(): rollout_transitions[k] = np.concatenate(v, axis=0) traj_idxs = rollout_transitions["traj_idxs"] rtgs = returns[traj_idxs] - rollout_transitions["acc_rets"] # rtgs = returns[traj_idxs] rollout_transitions["rtgs"] = rtgs[..., None] # (N,1) return rollout_transitions, \ {"num_transitions": num_transitions, "reward_mean": rewards_arr.mean(), "returns": returns, "max_rewards": max_rewards, "rewards_full": rewards_full} def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") diff_dataset, _ = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part = "dynamics") # key: output file name, value: output handler type print(f"Logging dynamics to {log_dirs}") output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" }
# Behavior policy ablation study ''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpbeh") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--n_layer", type=int, default=4) parser.add_argument("--n_head", type=int, default=4) parser.add_argument("--n_embd", type=int, default=32) parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_epoch", type=int, default=80) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") parser.add_argument("--behavior_hidden_dims", type=int, nargs='*', default=[200,200,200,200]) parser.add_argument("--behavior_lr", type=float, default=1e-3) parser.add_argument("--behavior_weight_decay", type=float, default=0.1) # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=1000, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics: TransformerDynamicsModel, rollout_policy: SimpleDiffusionPolicy, rollout_length: int ) -> Tuple[Dict[str, np.ndarray], Dict]: ''' Only serves for non-terminal cases Sample a batch of trajectories at the same time. Output rollout_transitions contain keys: obss, next_obss, actions rewards, (N,1) rtgs, (N,1) traj_idxs, (N) ''' num_transitions = 0 rewards_arr = np.array([]) rollout_transitions = defaultdict(list) batch_size = init_obss.shape[0] valid_idxs = np.arange(init_obss.shape[0]) # maintain current valid trajectory indexes returns = np.zeros(init_obss.shape[0]) # maintain return of each trajectory acc_returns = np.zeros(init_obss.shape[0]) # maintain accumulated return of each valid trajectory max_rewards = np.zeros(init_obss.shape[0]) # maintain max reward seen in trajectory rewards_full = np.zeros((init_obss.shape[0], rollout_length)) # full rewards (batch, H) # rollout observations = init_obss goal = np.zeros((init_obss.shape[0],1), dtype = np.float32) for t in range(rollout_length): actions = rollout_policy.select_action(observations, goal) next_observations, rewards, terminals, info = dynamics.step(observations, actions) rollout_transitions["observations"].append(observations) rollout_transitions["next_observations"].append(next_observations) rollout_transitions["actions"].append(actions) rollout_transitions["rewards"].append(rewards) rollout_transitions["terminals"].append(terminals) rollout_transitions["traj_idxs"].append(valid_idxs) rollout_transitions["acc_rets"].append(acc_returns) rewards = rewards.reshape(batch_size) # (B) rewards_full[:, t] = rewards num_transitions += len(observations) rewards_arr = np.append(rewards_arr, rewards.flatten()) returns = returns + rewards.flatten() # Update return (for valid idxs only) max_rewards = np.maximum(max_rewards, rewards.flatten()) # Update max reward acc_returns = acc_returns + rewards.flatten() observations = deepcopy(next_observations) for k, v in rollout_transitions.items(): rollout_transitions[k] = np.concatenate(v, axis=0) traj_idxs = rollout_transitions["traj_idxs"] rtgs = returns[traj_idxs] - rollout_transitions["acc_rets"] # rtgs = returns[traj_idxs] rollout_transitions["rtgs"] = rtgs[..., None] # (N,1) return rollout_transitions, \ {"num_transitions": num_transitions, "reward_mean": rewards_arr.mean(), "returns": returns, "max_rewards": max_rewards, "rewards_full": rewards_full} def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") diff_dataset, _ = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part = "dynamics") # key: output file name, value: output handler type print(f"Logging dynamics to {log_dirs}") output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" }
logger = Logger(log_dirs, output_config)
7
2023-10-11 08:36:06+00:00
24k
lmb-freiburg/ldce
ldm/models/diffusion/dpm_solver/sampler.py
[ { "identifier": "NoiseScheduleVP", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule='discrete',\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n ...
import torch from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
17,811
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) model_fn = model_wrapper( lambda x, t, c: self.model.apply_model(x, t, c), ns, model_type="noise", guidance_type="classifier-free", condition=conditioning, unconditional_condition=unconditional_conditioning, guidance_scale=unconditional_guidance_scale, )
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) model_fn = model_wrapper( lambda x, t, c: self.model.apply_model(x, t, c), ns, model_type="noise", guidance_type="classifier-free", condition=conditioning, unconditional_condition=unconditional_conditioning, guidance_scale=unconditional_guidance_scale, )
dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
2
2023-10-10 09:40:10+00:00
24k
spla-tam/SplaTAM
scripts/post_splatam_opt.py
[ { "identifier": "AzureKinectDataset", "path": "datasets/gradslam_datasets/azure.py", "snippet": "class AzureKinectDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = ...
import argparse import os import random import sys import shutil import cv2 import numpy as np import torch import wandb from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets import ( load_dataset_config, ICLDataset, ReplicaDataset, AzureKinectDataset, ScannetDataset, Ai2thorDataset, Record3DDataset, RealsenseDataset, TUMDataset, ScannetPPDataset, NeRFCaptureDataset ) from utils.common_utils import seed_everything, save_seq_params, save_params, save_params_ckpt, save_seq_params_ckpt from utils.recon_helpers import setup_camera from utils.gs_helpers import ( params2rendervar, params2depthplussilhouette, transformed_params2depthplussilhouette, transform_to_frame, report_progress, eval, l1_loss_v1, matrix_to_quaternion ) from utils.gs_external import ( calc_ssim, build_rotation, densify, get_expon_lr_func, update_learning_rate ) from diff_gaussian_rasterization import GaussianRasterizer as Renderer
17,180
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]:
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]:
return NeRFCaptureDataset(basedir, sequence, **kwargs)
10
2023-11-30 20:26:47+00:00
24k
zhyever/PatchFusion
zoedepth/trainers/zoedepth_custom_trainer.py
[ { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forwa...
import os import torch import torch.cuda.amp as amp import torch.nn as nn import numpy as np import wandb import uuid import torch.distributed as dist import copy import torch.optim as optim import matplotlib.pyplot as plt from zoedepth.trainers.loss_sample import SILogLoss, DistributionLoss from zoedepth.trainers.loss import SILogLoss as DenseSILogLoss from zoedepth.trainers.loss import BudgetConstraint, HistogramMatchingLoss, SSIM, ConsistencyLoss from zoedepth.utils.config import DATASETS_CONFIG from zoedepth.utils.misc import compute_metrics from zoedepth.data.preprocess import get_black_border from .base_trainer import BaseTrainer, is_rank_zero, colors, flatten from torchvision import transforms from PIL import Image from tqdm import tqdm from datetime import datetime as dt from zoedepth.utils.misc import generatemask
15,627
with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8) black_border_params = get_black_border(x_np) top, bottom, left, right = black_border_params.top, black_border_params.bottom, black_border_params.left, black_border_params.right x_np_cropped = x_np[top:bottom, left:right, :] x_cropped = transforms.ToTensor()(Image.fromarray(x_np_cropped)) # run inference on the cropped image pred_depths_cropped = self.eval_infer(x_cropped.unsqueeze(0).to(self.device)) # resize the prediction to x_np_cropped's size pred_depths_cropped = nn.functional.interpolate( pred_depths_cropped, size=(x_np_cropped.shape[0], x_np_cropped.shape[1]), mode="bilinear", align_corners=False) # pad the prediction back to the original size pred_depths = torch.zeros((1, 1, x_np.shape[0], x_np.shape[1]), device=pred_depths_cropped.device, dtype=pred_depths_cropped.dtype) pred_depths[:, :, top:bottom, left:right] = pred_depths_cropped return pred_depths def validate_on_batch(self, batch, val_step): images = batch['image'].to(self.device) depths_gt = batch['depth'].to(self.device) dataset = batch['dataset'][0] image_raw = batch['image_raw'].to(self.device) mask = batch["mask"].to(self.device) disp_gt_edges = batch['disp_gt_edges'].squeeze().numpy() bboxs = batch.get("bbox", None) if bboxs is not None: bboxs = bboxs.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) if 'has_valid_depth' in batch: if not batch['has_valid_depth']: return None, None depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0) mask = mask.squeeze().unsqueeze(0).unsqueeze(0) # if dataset == 'nyu': # pred_depths = self.crop_aware_infer(images, image_raw) # else: # pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0) # print(pred_depths.shape) # torch.Size([1, 1, 2160, 3840]) # print(depths_gt.shape) # torch.Size([1, 1, 2160, 3840]) with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: l_depth = self.silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool)) else: l_depth = self.dense_silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True) metrics = compute_metrics(depths_gt, pred_depths, disp_gt_edges=disp_gt_edges, **self.config) losses = {f"{self.silog_loss.name}": l_depth.item()} if self.should_log and self.config.get("debug", False): print(metrics) if val_step in [21, 27] and self.should_log: if self.config.get("debug", False): pass else: if self.sec_stage: log_rgb = image_raw else: log_rgb = images scale_pred = nn.functional.interpolate( pred_depths[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] self.log_images(rgb={"Input": log_rgb[0]}, depth={"GT": depths_gt[0], "PredictedMono": scale_pred}, prefix="Test", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return metrics, losses def train(self): print(f"Training {self.config.name}") if self.config.uid is None: self.config.uid = str(uuid.uuid4()).split('-')[-1] run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}" self.config.run_id = run_id self.config.experiment_id = f"{self.config.wandb_start}_{self.config.name}{self.config.version_name}_{run_id}" self.should_write = ((not self.config.distributed) or self.config.rank == 0) self.should_log = self.should_write # and logging if self.should_log: if self.config.get("debug", False): pass else: tags = self.config.tags.split( ',') if self.config.tags != '' else None
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else: self.scale_loss = HistogramMatchingLoss(min_depth=self.config.min_depth, max_depth=self.config.max_depth) self.scale_target = config.get("scale_target", None) self.consistency_training = config.get("consistency_training", False) if self.consistency_training: self.consistency_target = config.get("consistency_target", None) self.consistency_loss = ConsistencyLoss(self.consistency_target, config.get("focus_flatten", False), config.get("w_p", 1.0)) print("current weight for consistency loss is {}. focus_flatten is {}. w_p is {}".format(self.config.w_consistency, config.get("focus_flatten", False), config.get("w_p", 1.0))) def train_on_batch(self, batch, train_step, step_rate): """ Expects a batch of images and depth as input batch["image"].shape : batch_size, c, h, w batch["depth"].shape : batch_size, 1, h, w """ images, depths_gt = batch['image'].to(self.device), batch['depth'].to(self.device) image_raw = batch.get("image_raw", None) if image_raw is not None: image_raw = image_raw.to(self.device) sample_points = None if self.sampled_training: sample_points = batch['sample_points'].to(self.device) bbox = batch.get("bbox", None) if bbox is not None: bbox = bbox.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) depth_raw = batch.get("depth_raw", None) if depth_raw is not None: depth_raw = depth_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) shift = batch.get("shift", None) if shift is not None: shift = shift.to(self.device) dataset = batch['dataset'][0] b, c, h, w = images.size() mask = batch["mask"].to(self.device).to(torch.bool) sample_mask = batch.get("sample_mask", None) if sample_mask is not None: sample_mask = sample_mask.to(self.device).to(torch.bool) mask_raw = batch.get("mask_raw", None) if mask_raw is not None: mask_raw = mask_raw.to(self.device).to(torch.bool) losses = {} with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: output = self.model(images, sample_points, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) else: output = self.model(images, None, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) if self.boostingdepth: if self.lazy_epoch < self.epoch: output.update_learning_rate() self.lazy_epoch = self.epoch input_dict = dict() input_dict['data_gtfake'] = depths_gt output.set_input_train_gt(input_dict) output.optimize_parameters() pred_depths = output.fake_B pred = output.fake_B # print(torch.min(pred), torch.max(pred)) losses = output.get_current_losses() else: pred_depths = output['metric_depth'] if self.sampled_training: sampled_depth_gt = sample_points[:, :, -1].float().unsqueeze(dim=-1) sampled_depth_gt = sampled_depth_gt.permute(0, 2, 1) if self.config.get("representation", "") == 'biLaplacian': # only for sampled training for now l_dist, l_si = self.distribution_loss(output, sampled_depth_gt, mask=sample_mask) loss = self.config.w_dist * l_dist + self.config.w_si * l_si losses['distribution_loss'] = l_dist losses['sigloss'] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.sampled_training: l_si = self.silog_loss( pred_depths, sampled_depth_gt, mask=sample_mask) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.multi_consistency: #### here here here pred_depths, coarse, fine = output['metric_depth'], output['coarse_depth_pred'], output['fine_depth_pred'] if self.consistency_training: depths_gt = torch.split(depths_gt, 1, dim=1) depths_gt = torch.cat(depths_gt, dim=0) mask = torch.split(mask, 1, dim=-1) mask = torch.cat(mask, dim=0).permute(0, 3, 1, 2) mask_raw = torch.cat([mask_raw, mask_raw], dim=0) depth_raw = torch.cat([depth_raw, depth_raw], dim=0) temp_features = output.get('temp_features', None) l_si_1, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_f, pred_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses[self.silog_loss.name] = l_si_1 losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c # loss = l_si_1 + l_si_f + l_si_c loss = l_si_1 if self.consistency_training: try: # depths_gt? pred_f? l_consistency = self.consistency_loss(pred, shift, mask, temp_features, pred_f=depths_gt) # use the resized pred except RuntimeError as e: print(e) print("some runtime error here! Hack with 0") l_consistency = torch.Tensor([0]).squeeze() losses[self.consistency_loss.name] = l_consistency loss += l_consistency * self.config.w_consistency else: l_si, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.dynamic: if step_rate > self.dynamic_unupdate_rate: warm_up_rate = min(1.0, (step_rate - self.dynamic_unupdate_rate) / 0.02) flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=warm_up_rate) loss += self.config.w_flop * flop_cost losses['flop_loss'] = flop_cost else: flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=1) loss += 0 * flop_cost losses['flop_loss'] = flop_cost if self.use_scale_loss: if self.scale_target == 'coarse': h_loss = self.scale_loss(pred_depths, output['coarse_depth_pred_roi'], mask, interpolate=True) else: h_loss = self.scale_loss(pred_depths, depths_gt, mask, interpolate=True) loss += self.config.w_scale * h_loss losses['scale_loss'] = h_loss # self.scaler.scale(loss).backward() # if self.config.clip_grad > 0: # self.scaler.unscale_(self.optimizer) # nn.utils.clip_grad_norm_( # self.model.parameters(), self.config.clip_grad) # self.scaler.step(self.optimizer) # self.scaler.update() # self.optimizer.zero_grad() self.scaler.scale(loss).backward() if self.config.clip_grad > 0: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_( self.model.parameters(), self.config.clip_grad) self.scaler.step(self.optimizer) self.scaler.update() self.optimizer.zero_grad() if self.should_log and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0: if self.config.get("debug", False): pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] plt.imshow(pred.squeeze().detach().cpu().numpy()) plt.savefig('debug.png') pass else: pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] if self.consistency_training: split_images = torch.split(images, 3, dim=1) images = torch.cat(split_images, dim=0) self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred}, prefix="Train", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return losses @torch.no_grad() def eval_infer(self, x, image_raw, bboxs=None, crop_area=None, dataset='u4k', bbox_raw=None): m = self.model.module if self.config.multigpu else self.model if dataset == 'u4k': base_h = 540 base_w = 960 elif dataset == 'gta': base_h = 270 base_w = 480 elif dataset == 'nyu': base_h = 120 * 2 base_w = 160 * 2 else: raise NotImplementedError if dataset == 'nyu': if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] else: if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8) black_border_params = get_black_border(x_np) top, bottom, left, right = black_border_params.top, black_border_params.bottom, black_border_params.left, black_border_params.right x_np_cropped = x_np[top:bottom, left:right, :] x_cropped = transforms.ToTensor()(Image.fromarray(x_np_cropped)) # run inference on the cropped image pred_depths_cropped = self.eval_infer(x_cropped.unsqueeze(0).to(self.device)) # resize the prediction to x_np_cropped's size pred_depths_cropped = nn.functional.interpolate( pred_depths_cropped, size=(x_np_cropped.shape[0], x_np_cropped.shape[1]), mode="bilinear", align_corners=False) # pad the prediction back to the original size pred_depths = torch.zeros((1, 1, x_np.shape[0], x_np.shape[1]), device=pred_depths_cropped.device, dtype=pred_depths_cropped.dtype) pred_depths[:, :, top:bottom, left:right] = pred_depths_cropped return pred_depths def validate_on_batch(self, batch, val_step): images = batch['image'].to(self.device) depths_gt = batch['depth'].to(self.device) dataset = batch['dataset'][0] image_raw = batch['image_raw'].to(self.device) mask = batch["mask"].to(self.device) disp_gt_edges = batch['disp_gt_edges'].squeeze().numpy() bboxs = batch.get("bbox", None) if bboxs is not None: bboxs = bboxs.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) if 'has_valid_depth' in batch: if not batch['has_valid_depth']: return None, None depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0) mask = mask.squeeze().unsqueeze(0).unsqueeze(0) # if dataset == 'nyu': # pred_depths = self.crop_aware_infer(images, image_raw) # else: # pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = self.eval_infer(images, image_raw, bboxs, crop_area, dataset, bbox_raw) pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0) # print(pred_depths.shape) # torch.Size([1, 1, 2160, 3840]) # print(depths_gt.shape) # torch.Size([1, 1, 2160, 3840]) with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: l_depth = self.silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool)) else: l_depth = self.dense_silog_loss( pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True) metrics = compute_metrics(depths_gt, pred_depths, disp_gt_edges=disp_gt_edges, **self.config) losses = {f"{self.silog_loss.name}": l_depth.item()} if self.should_log and self.config.get("debug", False): print(metrics) if val_step in [21, 27] and self.should_log: if self.config.get("debug", False): pass else: if self.sec_stage: log_rgb = image_raw else: log_rgb = images scale_pred = nn.functional.interpolate( pred_depths[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] self.log_images(rgb={"Input": log_rgb[0]}, depth={"GT": depths_gt[0], "PredictedMono": scale_pred}, prefix="Test", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return metrics, losses def train(self): print(f"Training {self.config.name}") if self.config.uid is None: self.config.uid = str(uuid.uuid4()).split('-')[-1] run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}" self.config.run_id = run_id self.config.experiment_id = f"{self.config.wandb_start}_{self.config.name}{self.config.version_name}_{run_id}" self.should_write = ((not self.config.distributed) or self.config.rank == 0) self.should_log = self.should_write # and logging if self.should_log: if self.config.get("debug", False): pass else: tags = self.config.tags.split( ',') if self.config.tags != '' else None
wandb.init(project=self.config.project, name=self.config.experiment_id, config=flatten(self.config), dir=self.config.root,
10
2023-12-04 08:43:15+00:00
24k
baaivision/GeoDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.models.geometry.geodream_geometry_volume import GeodreamGeometryVolume from threestudio.utils.typing import * from pysdf import SDF
18,340
mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF) or isinstance(other, GeodreamGeometryVolume):
3
2023-12-01 01:59:42+00:00
24k
horseee/DeepCache
DeepCache/sd/pipeline_text_to_video_zero.py
[ { "identifier": "UNet2DConditionModel", "path": "DeepCache/sd/unet_2d_condition.py", "snippet": "class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n r\"\"\"\n A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a samp...
import copy import numpy as np import PIL.Image import torch import torch.nn.functional as F from dataclasses import dataclass from typing import Callable, List, Optional, Union from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from .unet_2d_condition import UNet2DConditionModel from .pipeline_stable_diffusion import StableDiffusionPipeline, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor
17,098
def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers,
def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100): samples = [] while len(samples) < sample_size: # Sample from a Gaussian centered at n/2 sample = int(np.random.normal(loc=n/2, scale=std_dev)) # Check if the sample is in bounds if 1 <= sample < n and sample not in samples: samples.append(sample) return samples def sample_from_quad(total_numbers, n_samples, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1) # Raise these values to the power of 1.5 to get a non-linear distribution indices = np.unique(np.int32(x_values**pow))[:-1] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1) indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
1
2023-12-01 10:54:04+00:00
24k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,252
"+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-11-27 02:39:39+00:00
24k
EricGuo5513/momask-codes
eval_t2m_trans_res.py
[ { "identifier": "MaskTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class MaskTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8,\n num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1,\n ...
import os import torch import utils.eval_t2m as eval_t2m import numpy as np from os.path import join as pjoin from models.mask_transformer.transformer import MaskTransformer, ResidualTransformer from models.vq.model import RVQVAE from options.eval_option import EvalT2MOptions from utils.get_opt import get_opt from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper from utils.fixseed import fixseed
14,808
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt') model_opt = get_opt(model_opt_path, device=opt.device) clip_version = 'ViT-B/32' vq_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'opt.txt') vq_opt = get_opt(vq_opt_path, device=opt.device) vq_model, vq_opt = load_vq_model(vq_opt) model_opt.num_tokens = vq_opt.nb_code model_opt.num_quantizers = vq_opt.num_quantizers model_opt.code_dim = vq_opt.code_dim res_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.res_name, 'opt.txt') res_opt = get_opt(res_opt_path, device=opt.device) res_model = load_res_model(res_opt) assert res_opt.vq_name == model_opt.vq_name dataset_opt_path = 'checkpoints/kit/Comp_v6_KLD005/opt.txt' if opt.dataset_name == 'kit' \ else 'checkpoints/t2m/Comp_v6_KLD005/opt.txt' wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt) ##### ---- Dataloader ---- ##### opt.nb_joints = 21 if opt.dataset_name == 'kit' else 22
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt') model_opt = get_opt(model_opt_path, device=opt.device) clip_version = 'ViT-B/32' vq_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, model_opt.vq_name, 'opt.txt') vq_opt = get_opt(vq_opt_path, device=opt.device) vq_model, vq_opt = load_vq_model(vq_opt) model_opt.num_tokens = vq_opt.nb_code model_opt.num_quantizers = vq_opt.num_quantizers model_opt.code_dim = vq_opt.code_dim res_opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.res_name, 'opt.txt') res_opt = get_opt(res_opt_path, device=opt.device) res_model = load_res_model(res_opt) assert res_opt.vq_name == model_opt.vq_name dataset_opt_path = 'checkpoints/kit/Comp_v6_KLD005/opt.txt' if opt.dataset_name == 'kit' \ else 'checkpoints/t2m/Comp_v6_KLD005/opt.txt' wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt) ##### ---- Dataloader ---- ##### opt.nb_joints = 21 if opt.dataset_name == 'kit' else 22
eval_val_loader, _ = get_dataset_motion_loader(dataset_opt_path, 32, 'test', device=opt.device)
5
2023-11-29 19:21:27+00:00
24k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/models/autoencoder_tiny.py
[ { "identifier": "ConfigMixin", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [...
from dataclasses import dataclass from typing import Tuple, Union from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.accelerate_utils import apply_forward_hook from .modeling_utils import ModelMixin from .vae import DecoderOutput, DecoderTiny, EncoderTiny import torch
19,857
# Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class AutoencoderTinyOutput(BaseOutput): """ Output of AutoencoderTiny encoding method. Args: latents (`torch.Tensor`): Encoded outputs of the `Encoder`. """ latents: torch.Tensor class AutoencoderTiny(ModelMixin, ConfigMixin): r""" A tiny distilled VAE model for encoding images into latents and decoding latent representations into images. [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`. This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each encoder block. The length of the tuple should be equal to the number of encoder blocks. decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each decoder block. The length of the tuple should be equal to the number of decoder blocks. act_fn (`str`, *optional*, defaults to `"relu"`): Activation function to be used throughout the model. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent representation. The latent space acts as a compressed representation of the input image. upsampling_scaling_factor (`int`, *optional*, defaults to 2): Scaling factor for upsampling in the decoder. It determines the size of the output image during the upsampling process. num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`): Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The length of the tuple should be equal to the number of stages in the encoder. Each stage has a different number of encoder blocks. num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`): Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The length of the tuple should be equal to the number of stages in the decoder. Each stage has a different number of decoder blocks. latent_magnitude (`float`, *optional*, defaults to 3.0): Magnitude of the latent representation. This parameter scales the latent representation values to control the extent of information preservation. latent_shift (float, *optional*, defaults to 0.5): Shift applied to the latent representation. This parameter controls the center of the latent space. scaling_factor (`float`, *optional*, defaults to 1.0): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder, however, no such scaling factor was used, hence the value of 1.0 as the default. force_upcast (`bool`, *optional*, default to `False`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without losing too much precision, in which case `force_upcast` can be set to `False` (see this fp16-friendly [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). """ _supports_gradient_checkpointing = True
# Copyright 2023 Ollin Boer Bohan and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class AutoencoderTinyOutput(BaseOutput): """ Output of AutoencoderTiny encoding method. Args: latents (`torch.Tensor`): Encoded outputs of the `Encoder`. """ latents: torch.Tensor class AutoencoderTiny(ModelMixin, ConfigMixin): r""" A tiny distilled VAE model for encoding images into latents and decoding latent representations into images. [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`. This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each encoder block. The length of the tuple should be equal to the number of encoder blocks. decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`): Tuple of integers representing the number of output channels for each decoder block. The length of the tuple should be equal to the number of decoder blocks. act_fn (`str`, *optional*, defaults to `"relu"`): Activation function to be used throughout the model. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent representation. The latent space acts as a compressed representation of the input image. upsampling_scaling_factor (`int`, *optional*, defaults to 2): Scaling factor for upsampling in the decoder. It determines the size of the output image during the upsampling process. num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`): Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The length of the tuple should be equal to the number of stages in the encoder. Each stage has a different number of encoder blocks. num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`): Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The length of the tuple should be equal to the number of stages in the decoder. Each stage has a different number of decoder blocks. latent_magnitude (`float`, *optional*, defaults to 3.0): Magnitude of the latent representation. This parameter scales the latent representation values to control the extent of information preservation. latent_shift (float, *optional*, defaults to 0.5): Shift applied to the latent representation. This parameter controls the center of the latent space. scaling_factor (`float`, *optional*, defaults to 1.0): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. For this Autoencoder, however, no such scaling factor was used, hence the value of 1.0 as the default. force_upcast (`bool`, *optional*, default to `False`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without losing too much precision, in which case `force_upcast` can be set to `False` (see this fp16-friendly [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). """ _supports_gradient_checkpointing = True
@register_to_config
1
2023-11-27 18:46:55+00:00
24k
JiahuiLei/GART
solver.py
[ { "identifier": "prepare_real_seq", "path": "lib_data/get_data.py", "snippet": "def prepare_real_seq(\n seq_name,\n dataset_mode,\n split=\"train\",\n image_zoom_ratio=0.5,\n balance=False,\n ins_avt_wild_start_end_skip=None,\n):\n logging.info(\"Prepare real seq: {}\".format(seq_na...
from matplotlib import pyplot as plt from pytorch3d.transforms import matrix_to_axis_angle from tqdm import tqdm from transforms3d.euler import euler2mat from omegaconf import OmegaConf from lib_data.get_data import prepare_real_seq from lib_data.data_provider import DatabasePoseProvider from lib_gart.templates import get_template from lib_gart.model import GaussianTemplateModel, AdditionalBones from lib_gart.optim_utils import * from lib_render.gauspl_renderer import render_cam_pcl from lib_gart.model_utils import transform_mu_frame from utils.misc import * from utils.viz import viz_render from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle from pytorch3d.ops import knn_points from lib_guidance.camera_sampling import sample_camera, fov2K, opencv2blender from viz_utils import viz_spinning, viz_human_all, viz_dog_all from utils.ssim import ssim from datetime import datetime from test_utils import test from lib_guidance.mvdream.mvdream_guidance import MVDream from utils.lpips import LPIPS import imageio import torch import numpy as np import os, os.path as osp, shutil, sys import time import logging import argparse
21,036
setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED) template = get_template( mode=self.mode, template_model_path=self.template_model_path, init_beta=betas, cano_pose_type=getattr(self, "CANO_POSE_TYPE", "t_pose"), voxel_deformer_res=getattr(self, "VOXEL_DEFORMER_RES", 64), ) add_bones = AdditionalBones( num_bones=getattr(self, "W_REST_DIM", 0), mode=getattr(self, "W_REST_MODE", "pose-mlp"), total_t=add_bones_total_t, # pose mlp mlp_hidden_dims=getattr( self, "ADD_BONES_MLP_HIDDEN_DIMS", [256, 256, 256, 256] ), pose_dim=23 * 3 if self.mode == "human" else 34 * 3 + 7, )
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED) template = get_template( mode=self.mode, template_model_path=self.template_model_path, init_beta=betas, cano_pose_type=getattr(self, "CANO_POSE_TYPE", "t_pose"), voxel_deformer_res=getattr(self, "VOXEL_DEFORMER_RES", 64), ) add_bones = AdditionalBones( num_bones=getattr(self, "W_REST_DIM", 0), mode=getattr(self, "W_REST_MODE", "pose-mlp"), total_t=add_bones_total_t, # pose mlp mlp_hidden_dims=getattr( self, "ADD_BONES_MLP_HIDDEN_DIMS", [256, 256, 256, 256] ), pose_dim=23 * 3 if self.mode == "human" else 34 * 3 + 7, )
model = GaussianTemplateModel(
3
2023-11-27 17:30:04+00:00
24k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T...
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
16,223
elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames[:20]): cam_name = os.path.join(path, frame["file_path"] + extension) # NeRF 'transform_matrix' is a camera-to-world transform c2w = np.array(frame["transform_matrix"]) # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) c2w[:3, 1:3] *= -1 # get the world-to-camera transform and set R, T w2c = np.linalg.inv(c2w) R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) im_data = np.array(image.convert("RGBA")) bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) FovY = fovy FovX = fovx cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=None, bound_mask=None, width=image.size[0], height=image.size[1])) return cam_infos def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): print("Reading Training Transforms") train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) print("Reading Test Transforms") test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 shs = np.random.random((num_pts, 3)) / 255.0
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames[:20]): cam_name = os.path.join(path, frame["file_path"] + extension) # NeRF 'transform_matrix' is a camera-to-world transform c2w = np.array(frame["transform_matrix"]) # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) c2w[:3, 1:3] *= -1 # get the world-to-camera transform and set R, T w2c = np.linalg.inv(c2w) R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) im_data = np.array(image.convert("RGBA")) bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) FovY = fovy FovX = fovx cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=None, bound_mask=None, width=image.size[0], height=image.size[1])) return cam_infos def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): print("Reading Training Transforms") train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) print("Reading Test Transforms") test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
10
2023-11-29 07:10:39+00:00
24k
cswry/SeeSR
test_seesr.py
[ { "identifier": "StableDiffusionControlNetPipeline", "path": "pipelines/pipeline_seesr.py", "snippet": "class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin):\n r\"\"\"\n Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.\n\n ...
import os import sys import cv2 import glob import argparse import numpy as np import torch import torch.utils.checkpoint import torch.nn as nn import torch.nn.functional as F from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor from pipelines.pipeline_seesr import StableDiffusionControlNetPipeline from utils.misc import load_dreambooth_lora from utils.wavelet_color_fix import wavelet_color_fix, adain_color_fix from ram.models.ram_lora import ram from ram import inference_ram as inference from ram import get_transform from typing import Mapping, Any from torchvision import transforms from torchvision import transforms from models.controlnet import ControlNetModel from models.unet_2d_condition import UNet2DConditionModel
15,067
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline validation_pipeline = StableDiffusionControlNetPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False, ) validation_pipeline._init_tiled_vae(encoder_tile_size=args.vae_encoder_tiled_size, decoder_tile_size=args.vae_decoder_tiled_size) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'):
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline validation_pipeline = StableDiffusionControlNetPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False, ) validation_pipeline._init_tiled_vae(encoder_tile_size=args.vae_encoder_tiled_size, decoder_tile_size=args.vae_decoder_tiled_size) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'):
model = ram(pretrained='preset/models/ram_swin_large_14m.pth',
4
2023-11-27 08:50:33+00:00
24k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n sel...
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
15,076
parser.add_argument("--sds_weight_strategy", type=int, nargs=1, default=0, choices=[0, 1, 2], help="The strategy of the sds loss's weight") parser.add_argument("--translation_y", type= float, nargs=1, default= 0 , help="translation of the initial shape on the y-axis") parser.add_argument("--coarse_iter", type= int, nargs=1, default= 1000 , help="The iteration number of the coarse stage.") parser.add_argument('--early_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in early phase") parser.add_argument('--late_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in late phase") parser.add_argument("--sdf_init_shape_rotate_x", type= int, nargs=1, default= 0 , help="rotation of the initial shape on the x-axis") parser.add_argument("--if_flip_the_normal", action='store_true', default=False , help="Flip the x-axis positive half-axis of Normal. We find this process helps to alleviate the Janus problem.") parser.add_argument("--front_threshold", type= int, nargs=1, default= 45 , help="the range of front view would be [-front_threshold, front_threshold") parser.add_argument("--if_use_bump", type=bool, default= True , help="whether to use perturbed normals during appearing modeling") parser.add_argument("--uv_padding_block", type= int, default= 4 , help="The block of uv padding.") FLAGS = parser.parse_args() FLAGS.mtl_override = None # Override material of model FLAGS.dmtet_grid = 64 # Resolution of initial tet grid. We provide 64, 128 and 256 resolution grids. Other resolutions can be generated with https://github.com/crawforddoran/quartet FLAGS.mesh_scale = 2.1 # Scale of tet grid box. Adjust to cover the model FLAGS.env_scale = 1.0 # Env map intensity multiplier FLAGS.envmap = None # HDR environment probe FLAGS.relight = None # HDR environment probe(relight) FLAGS.display = None # Conf validation window/display. E.g. [{"relight" : <path to envlight>}] FLAGS.camera_space_light = False # Fixed light in camera space. This is needed for setups like ethiopian head where the scanned object rotates on a stand. FLAGS.lock_light = False # Disable light optimization in the second pass FLAGS.lock_pos = False # Disable vertex position optimization in the second pass FLAGS.pre_load = True # Pre-load entire dataset into memory for faster training FLAGS.kd_min = [ 0.0, 0.0, 0.0, 0.0] # Limits for kd FLAGS.kd_max = [ 1.0, 1.0, 1.0, 1.0] FLAGS.ks_min = [ 0.0, 0.08, 0.0] # Limits for ks FLAGS.ks_max = [ 1.0, 1.0, 1.0] FLAGS.nrm_min = [-1.0, -1.0, 0.0] # Limits for normal map FLAGS.nrm_max = [ 1.0, 1.0, 1.0] FLAGS.cam_near_far = [1, 50] FLAGS.learn_light = False FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' :
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh @torch.no_grad() def xatlas_uvmap1(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) new_mesh = mesh.Mesh( base=eval_mesh) mask, kd, ks, normal = render.render_uv1(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'], FLAGS.uv_padding_block) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh ############################################################################### # Utility functions for material ############################################################################### def get_normalize_mesh(pro_path): mesh = o3d.io.read_triangle_mesh(pro_path) vertices = np.asarray(mesh.vertices) shift = np.mean(vertices,axis=0) scale = np.max(np.linalg.norm(vertices-shift, ord=2, axis=1)) vertices = (vertices-shift) / scale mesh.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) return mesh def initial_guness_material(geometry, mlp, FLAGS, init_mat=None): # ipdb.set_trace(()) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') if mlp: mlp_min = torch.cat((kd_min[0:3], ks_min, nrm_min), dim=0) mlp_max = torch.cat((kd_max[0:3], ks_max, nrm_max), dim=0) mlp_map_opt = mlptexture.MLPTexture3D(geometry.getAABB(), channels=9, min_max=[mlp_min, mlp_max]) mat = material.Material({'kd_ks_normal' : mlp_map_opt}) else: # Setup Kd (albedo) and Ks (x, roughness, metalness) textures if FLAGS.random_textures or init_mat is None: num_channels = 4 if FLAGS.layers > 1 else 3 kd_init = torch.rand(size=FLAGS.texture_res + [num_channels], device='cuda') * (kd_max - kd_min)[None, None, 0:num_channels] + kd_min[None, None, 0:num_channels] kd_map_opt = texture.create_trainable(kd_init , FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ksR = np.random.uniform(size=FLAGS.texture_res + [1], low=0.0, high=0.01) ksG = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[1].cpu(), high=ks_max[1].cpu()) ksB = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[2].cpu(), high=ks_max[2].cpu()) ks_map_opt = texture.create_trainable(np.concatenate((ksR, ksG, ksB), axis=2), FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) else: kd_map_opt = texture.create_trainable(init_mat['kd'], FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ks_map_opt = texture.create_trainable(init_mat['ks'], FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) # Setup normal map if FLAGS.random_textures or init_mat is None or 'normal' not in init_mat: normal_map_opt = texture.create_trainable(np.array([0, 0, 1]), FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) else: normal_map_opt = texture.create_trainable(init_mat['normal'], FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) mat = material.Material({ 'kd' : kd_map_opt, 'ks' : ks_map_opt, 'normal' : normal_map_opt }) if init_mat is not None: mat['bsdf'] = init_mat['bsdf'] else: mat['bsdf'] = 'pbr' return mat ############################################################################### # Validation & testing ############################################################################### # @torch.no_grad() def validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight = None): result_dict = {} with torch.no_grad(): if FLAGS.mode == 'appearance_modeling': with torch.no_grad(): lgt.build_mips() if FLAGS.camera_space_light: lgt.xfm(target['mv']) if relight != None: relight.build_mips() buffers = geometry.render(glctx, target, lgt, opt_material, if_use_bump = FLAGS.if_use_bump) result_dict['shaded'] = buffers['shaded'][0, ..., 0:3] result_dict['shaded'] = util.rgb_to_srgb(result_dict['shaded']) if relight != None: result_dict['relight'] = geometry.render(glctx, target, relight, opt_material, if_use_bump = FLAGS.if_use_bump)['shaded'][0, ..., 0:3] result_dict['relight'] = util.rgb_to_srgb(result_dict['relight']) result_dict['mask'] = (buffers['shaded'][0, ..., 3:4]) result_image = result_dict['shaded'] if FLAGS.display is not None : # white_bg = torch.ones_like(target['background']) for layer in FLAGS.display: if 'latlong' in layer and layer['latlong']: if isinstance(lgt, light.EnvironmentLight): result_dict['light_image'] = util.cubemap_to_latlong(lgt.base, FLAGS.display_res) result_image = torch.cat([result_image, result_dict['light_image']], axis=1) elif 'bsdf' in layer: buffers = geometry.render(glctx, target, lgt, opt_material, bsdf=layer['bsdf'], if_use_bump = FLAGS.if_use_bump) if layer['bsdf'] == 'kd': result_dict[layer['bsdf']] = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) elif layer['bsdf'] == 'normal': result_dict[layer['bsdf']] = (buffers['shaded'][0, ..., 0:3] + 1) * 0.5 else: result_dict[layer['bsdf']] = buffers['shaded'][0, ..., 0:3] result_image = torch.cat([result_image, result_dict[layer['bsdf']]], axis=1) return result_image, result_dict def save_gif(dir,fps): imgpath = dir frames = [] for idx in sorted(os.listdir(imgpath)): img = osp.join(imgpath,idx) frames.append(imageio.imread(img)) imageio.mimsave(os.path.join(dir, 'eval.gif'),frames,'GIF',duration=1/fps,loop=0) @torch.no_grad() def validate(glctx, geometry, opt_material, lgt, dataset_validate, out_dir, FLAGS, relight= None): # ============================================================================================== # Validation loop # ============================================================================================== mse_values = [] psnr_values = [] dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_validate.collate) os.makedirs(out_dir, exist_ok=True) shaded_dir = os.path.join(out_dir, "shaded") relight_dir = os.path.join(out_dir, "relight") kd_dir = os.path.join(out_dir, "kd") ks_dir = os.path.join(out_dir, "ks") normal_dir = os.path.join(out_dir, "normal") mask_dir = os.path.join(out_dir, "mask") os.makedirs(shaded_dir, exist_ok=True) os.makedirs(relight_dir, exist_ok=True) os.makedirs(kd_dir, exist_ok=True) os.makedirs(ks_dir, exist_ok=True) os.makedirs(normal_dir, exist_ok=True) os.makedirs(mask_dir, exist_ok=True) print("Running validation") dataloader_validate = tqdm(dataloader_validate) for it, target in enumerate(dataloader_validate): # Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad(): params = get_camera_params( resolution=512, fov=45, elev_angle=-20, azim_angle =rot_ang, ) rot_ang += 1 if FLAGS.mode =='geometry_modeling': buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='normal', if_use_bump = FLAGS.if_use_bump) video_image = (buffers['shaded'][0, ..., 0:3]+1)/2 else: buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='pbr', if_use_bump = FLAGS.if_use_bump) video_image = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) video_image = video.ready_image(video_image) iter_start_time = time.time() if FLAGS.mode =='geometry_modeling': if it<=400: if_pretrain = True else: if_pretrain = False if_normal =True else: if_pretrain = False if_normal = False with torch.cuda.amp.autocast(enabled= True): if if_pretrain== True: reg_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices = scene_and_vertices) img_loss = 0 sds_loss = 0 attention_loss = 0 if if_pretrain == False: sds_loss, img_loss, reg_loss, attention_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices =None) if FLAGS.mode =='geometry_modeling': if(it<1000): attention_loss = 0 else: if(it<500): attention_loss = 0 # ============================================================================================== # Final loss # ============================================================================================== total_loss = img_loss + reg_loss + sds_loss + attention_loss if if_pretrain == True: scaler.scale(total_loss).backward() if if_pretrain == False: scaler.scale(total_loss).backward() img_loss_vec.append(img_loss.item()) reg_loss_vec.append(reg_loss.item()) # ============================================================================================== # Backpropagate # ============================================================================================== if if_normal == False and if_pretrain == False: scaler.step(optimizer) optimizer.zero_grad() if if_normal == True or if_pretrain == True: if optimize_geometry: scaler.step(optimizer_mesh) optimizer_mesh.zero_grad() for param in guidance.parameters(): if param.grad is not None and torch.isnan(param.grad).any(): param.grad = torch.nan_to_num(param.grad, nan=0.0) max_norm = 5.0 torch.nn.utils.clip_grad_norm_(guidance.parameters(), max_norm) if if_pretrain == False: optimizer_lora.step() optimizer_lora.zero_grad() for param in guidance.parameters(): param.data = torch.nan_to_num(param.data, nan=0.0, posinf=None, neginf=None) scaler.update() # ============================================================================================== # Clamp trainables to reasonable range # ============================================================================================== with torch.no_grad(): if 'kd' in opt_material: opt_material['kd'].clamp_() if 'ks' in opt_material: opt_material['ks'].clamp_() if 'normal' in opt_material: opt_material['normal'].clamp_() opt_material['normal'].normalize_() if lgt is not None: lgt.clamp_(min=0.0) torch.cuda.current_stream().synchronize() iter_dur_vec.append(time.time() - iter_start_time) return geometry, opt_material def seed_everything(seed, local_rank): random.seed(seed + local_rank) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed + local_rank) torch.manual_seed(seed) torch.cuda.manual_seed(seed) if __name__ == "__main__": parser = argparse.ArgumentParser(description='nvdiffrec') parser.add_argument('--config', type=str, default='configs_clean3/icecream_geometry_debug.json', help='Config file') parser.add_argument('-i', '--iter', type=int, default=5000) parser.add_argument('-b', '--batch', type=int, default=1) parser.add_argument('-s', '--spp', type=int, default=1) parser.add_argument('-l', '--layers', type=int, default=1) parser.add_argument('-r', '--train-res', nargs=2, type=int, default=[512, 512]) parser.add_argument('-dr', '--display-res', type=int, default=None) parser.add_argument('-tr', '--texture-res', nargs=2, type=int, default=[1024, 1024]) parser.add_argument('-si', '--save-interval', type=int, default=1000, help="The interval of saving an image") parser.add_argument('-vi', '--video_interval', type=int, default=10, help="The interval of saving a frame of the video") parser.add_argument('-mr', '--min-roughness', type=float, default=0.08) parser.add_argument('-mip', '--custom-mip', action='store_true', default=False) parser.add_argument('-rt', '--random-textures', action='store_true', default=False) parser.add_argument('-bg', '--train_background', default='black', choices=['black', 'white', 'checker', 'reference']) parser.add_argument('-o', '--out-dir', type=str, default='results/result_debug/icecream_geometry') parser.add_argument('-rm', '--ref_mesh', type=str) parser.add_argument('-bm', '--base-mesh', type=str, default=None) parser.add_argument('--validate', type=bool, default=True) parser.add_argument("--local_rank", type=int, default=0, help="For distributed training: local_rank") parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument("--add_directional_text", action='store_true', default=False) parser.add_argument('--mode', default='geometry_modeling', choices=['geometry_modeling', 'appearance_modeling']) parser.add_argument('--text', default=None, help="text prompt") parser.add_argument('--sdf_init_shape', default='ellipsoid', choices=['ellipsoid', 'cylinder', 'custom_mesh']) parser.add_argument('--camera_random_jitter', type= float, default=0.4, help="A large value is advantageous for the extension of objects such as ears or sharp corners to grow.") parser.add_argument('--fovy_range', nargs=2, type=float, default=[25.71, 45.00]) parser.add_argument('--elevation_range', nargs=2, type=int, default=[-10, 45], help="The elevatioin range must in [-90, 90].") parser.add_argument("--guidance_weight", type=int, default=100, help="The weight of classifier-free guidance") parser.add_argument("--sds_weight_strategy", type=int, nargs=1, default=0, choices=[0, 1, 2], help="The strategy of the sds loss's weight") parser.add_argument("--translation_y", type= float, nargs=1, default= 0 , help="translation of the initial shape on the y-axis") parser.add_argument("--coarse_iter", type= int, nargs=1, default= 1000 , help="The iteration number of the coarse stage.") parser.add_argument('--early_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in early phase") parser.add_argument('--late_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in late phase") parser.add_argument("--sdf_init_shape_rotate_x", type= int, nargs=1, default= 0 , help="rotation of the initial shape on the x-axis") parser.add_argument("--if_flip_the_normal", action='store_true', default=False , help="Flip the x-axis positive half-axis of Normal. We find this process helps to alleviate the Janus problem.") parser.add_argument("--front_threshold", type= int, nargs=1, default= 45 , help="the range of front view would be [-front_threshold, front_threshold") parser.add_argument("--if_use_bump", type=bool, default= True , help="whether to use perturbed normals during appearing modeling") parser.add_argument("--uv_padding_block", type= int, default= 4 , help="The block of uv padding.") FLAGS = parser.parse_args() FLAGS.mtl_override = None # Override material of model FLAGS.dmtet_grid = 64 # Resolution of initial tet grid. We provide 64, 128 and 256 resolution grids. Other resolutions can be generated with https://github.com/crawforddoran/quartet FLAGS.mesh_scale = 2.1 # Scale of tet grid box. Adjust to cover the model FLAGS.env_scale = 1.0 # Env map intensity multiplier FLAGS.envmap = None # HDR environment probe FLAGS.relight = None # HDR environment probe(relight) FLAGS.display = None # Conf validation window/display. E.g. [{"relight" : <path to envlight>}] FLAGS.camera_space_light = False # Fixed light in camera space. This is needed for setups like ethiopian head where the scanned object rotates on a stand. FLAGS.lock_light = False # Disable light optimization in the second pass FLAGS.lock_pos = False # Disable vertex position optimization in the second pass FLAGS.pre_load = True # Pre-load entire dataset into memory for faster training FLAGS.kd_min = [ 0.0, 0.0, 0.0, 0.0] # Limits for kd FLAGS.kd_max = [ 1.0, 1.0, 1.0, 1.0] FLAGS.ks_min = [ 0.0, 0.08, 0.0] # Limits for ks FLAGS.ks_max = [ 1.0, 1.0, 1.0] FLAGS.nrm_min = [-1.0, -1.0, 0.0] # Limits for normal map FLAGS.nrm_max = [ 1.0, 1.0, 1.0] FLAGS.cam_near_far = [1, 50] FLAGS.learn_light = False FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' :
geometry = DMTetGeometry(FLAGS.dmtet_grid, FLAGS.mesh_scale, FLAGS)
2
2023-11-27 13:44:01+00:00
24k
camenduru/magicanimate-hf
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
16,750
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
controlnet: ControlNetModel,
1
2023-12-04 20:47:34+00:00
24k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-i...
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,704
""" with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change()
0
2023-11-27 07:01:39+00:00
24k
NobiDeveloper/Nobita-Filter-Bot
plugins/commands.py
[ { "identifier": "script", "path": "Script.py", "snippet": "class script(object):\n START_TXT = \"\"\"\n<b>{},\n\nɪ ᴄᴀɴ ᴘʀᴏᴠɪᴅᴇ ᴍᴏᴠɪᴇs ᴀɴᴅ sᴇʀɪᴇs,\nᴊᴜsᴛ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴀɴᴅ ᴇɴᴊᴏʏ 😍\n\n💞 ᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ : <a href='https://telegram.me/MovieVillaYT'>ᴍᴏᴠɪᴇ ᴠɪʟʟᴀ</a></b>\n\"\"\"\n\n HE...
import os import logging import random import asyncio import re import json import base64 from Script import script from pyrogram import Client, filters, enums from pyrogram.errors import ChatAdminRequired, FloodWait from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup from database.ia_filterdb import Media, get_file_details, unpack_new_file_id, get_bad_files from database.users_chats_db import db from info import CHANNELS, ADMINS, AUTH_CHANNEL, LOG_CHANNEL, PICS, BATCH_FILE_CAPTION, CUSTOM_FILE_CAPTION, PROTECT_CONTENT, CHNL_LNK, GRP_LNK, REQST_CHANNEL, SUPPORT_CHAT_ID, SUPPORT_CHAT, MAX_B_TN, VERIFY, SHORTLINK_API, SHORTLINK_URL, TUTORIAL, IS_TUTORIAL, STICKERS from utils import get_settings, get_size, is_subscribed, save_group_settings, temp, verify_user, check_token, check_verification, get_token, get_shortlink, get_tutorial from database.connections_mdb import active_connection from plugins.pm_filter import ENABLE_SHORTLINK
17,791
logger = logging.getLogger(__name__) BATCH_FILES = {} @Client.on_message(filters.command("start") & filters.incoming) async def start(client, message): if message.chat.type in [enums.ChatType.GROUP, enums.ChatType.SUPERGROUP]: buttons = [ [ InlineKeyboardButton('🤖 ᴜᴘᴅᴀᴛᴇꜱ 🤖', url="https://telegram.me/Nobideveloper") ], [ InlineKeyboardButton('♻️ ᴘʟᴇᴀꜱᴇ ꜱʜᴀʀᴇ ♻️', url=f"https://telegram.me/share/url?url=telegram.me/Nobideveloper"), ] ] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_sticker(sticker=random.choice(STICKERS), reply_markup=reply_markup) await asyncio.sleep(2) if not await db.get_chat(message.chat.id): total=await client.get_chat_members_count(message.chat.id) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, "Unknown")) await db.add_chat(message.chat.id, message.chat.title) return if not await db.is_user_exist(message.from_user.id): await db.add_user(message.from_user.id, message.from_user.first_name) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(message.from_user.id, message.from_user.mention)) if len(message.command) != 2: buttons = [[ InlineKeyboardButton('⇄ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ⇄', url=f'http://telegram.me/{temp.U_NAME}?startgroup=true') ],[ InlineKeyboardButton('👨‍💻 ᴏᴡɴᴇʀ​', callback_data='button'), InlineKeyboardButton('🌿 ꜱᴜᴘᴘᴏʀᴛ', callback_data='group_info') ],[ InlineKeyboardButton('💠 ʜᴇʟᴘ 💠', callback_data='help'), InlineKeyboardButton('♻️ ᴀʙᴏᴜᴛ ♻️', callback_data='about') ],[ InlineKeyboardButton('💰 ᴇᴀʀɴ ᴍᴏɴᴇʏ ᴡɪᴛʜ ʙᴏᴛ 💸', callback_data='shortlink_info') ]] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_photo( photo=random.choice(PICS), caption=script.START_TXT.format(message.from_user.mention, temp.U_NAME, temp.B_NAME), reply_markup=reply_markup, parse_mode=enums.ParseMode.HTML ) return
logger = logging.getLogger(__name__) BATCH_FILES = {} @Client.on_message(filters.command("start") & filters.incoming) async def start(client, message): if message.chat.type in [enums.ChatType.GROUP, enums.ChatType.SUPERGROUP]: buttons = [ [ InlineKeyboardButton('🤖 ᴜᴘᴅᴀᴛᴇꜱ 🤖', url="https://telegram.me/Nobideveloper") ], [ InlineKeyboardButton('♻️ ᴘʟᴇᴀꜱᴇ ꜱʜᴀʀᴇ ♻️', url=f"https://telegram.me/share/url?url=telegram.me/Nobideveloper"), ] ] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_sticker(sticker=random.choice(STICKERS), reply_markup=reply_markup) await asyncio.sleep(2) if not await db.get_chat(message.chat.id): total=await client.get_chat_members_count(message.chat.id) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, "Unknown")) await db.add_chat(message.chat.id, message.chat.title) return if not await db.is_user_exist(message.from_user.id): await db.add_user(message.from_user.id, message.from_user.first_name) await client.send_message(LOG_CHANNEL, script.LOG_TEXT_P.format(message.from_user.id, message.from_user.mention)) if len(message.command) != 2: buttons = [[ InlineKeyboardButton('⇄ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ⇄', url=f'http://telegram.me/{temp.U_NAME}?startgroup=true') ],[ InlineKeyboardButton('👨‍💻 ᴏᴡɴᴇʀ​', callback_data='button'), InlineKeyboardButton('🌿 ꜱᴜᴘᴘᴏʀᴛ', callback_data='group_info') ],[ InlineKeyboardButton('💠 ʜᴇʟᴘ 💠', callback_data='help'), InlineKeyboardButton('♻️ ᴀʙᴏᴜᴛ ♻️', callback_data='about') ],[ InlineKeyboardButton('💰 ᴇᴀʀɴ ᴍᴏɴᴇʏ ᴡɪᴛʜ ʙᴏᴛ 💸', callback_data='shortlink_info') ]] reply_markup = InlineKeyboardMarkup(buttons) await message.reply_photo( photo=random.choice(PICS), caption=script.START_TXT.format(message.from_user.mention, temp.U_NAME, temp.B_NAME), reply_markup=reply_markup, parse_mode=enums.ParseMode.HTML ) return
if AUTH_CHANNEL and not await is_subscribed(client, message):
8
2023-11-28 13:36:56+00:00
24k
chenxx89/BFRffusion
models/models.py
[ { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\...
import torch import os import numpy as np import math import shutil import safetensors.torch from ldm.modules.diffusionmodules.util import timestep_embedding from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.diffusionmodules.openaimodel import UNetModel from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from data.dataset_instantiate import instantiate_from_config as instantiate_dataset_from_config from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from metrics.metrics_all import calculate_psnr_ssim, calculate_lpips, calculate_NIQE, calculate_fid_folder from torch.utils.data import DataLoader from PIL import Image from torch.optim.lr_scheduler import LambdaLR from omegaconf import OmegaConf
20,632
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) for i, module in enumerate(self.input_blocks): h = module(h, emb, context) if ((i+1)%3 == 0) and control is not None: h = h + control.pop(0) hs.append(h) h = self.middle_block(h, emb, context) if control is not None: h += control.pop(0) for i, module in enumerate(self.output_blocks): if control is None: h = torch.cat([h, hs.pop()], dim=1) else: h = torch.cat([h, hs.pop()], dim=1) h = module(h, emb, context) if ((i+2)%3 == 0) and control is not None: h = h + control.pop(0) h = h.type(x.dtype) return self.out(h)
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) h = x.type(self.dtype) for i, module in enumerate(self.input_blocks): h = module(h, emb, context) if ((i+1)%3 == 0) and control is not None: h = h + control.pop(0) hs.append(h) h = self.middle_block(h, emb, context) if control is not None: h += control.pop(0) for i, module in enumerate(self.output_blocks): if control is None: h = torch.cat([h, hs.pop()], dim=1) else: h = torch.cat([h, hs.pop()], dim=1) h = module(h, emb, context) if ((i+2)%3 == 0) and control is not None: h = h + control.pop(0) h = h.type(x.dtype) return self.out(h)
class BFRffusion(LatentDiffusion):
2
2023-11-30 13:50:58+00:00
24k
IanYeung/MGLD-VSR
ldm/models/diffusion/ddpm_inv.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import os import numpy as np import pytorch_lightning as pl from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
15,948
img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, shape,cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False, plot_diffusion_rows=False, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., embedding_reg_weight=0., unfreeze_model=False, model_lr=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.embedding_reg_weight = embedding_reg_weight self.unfreeze_model = unfreeze_model self.model_lr = model_lr if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, personalization_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if not self.unfreeze_model: self.cond_stage_model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False self.model.eval() self.model.train = disabled_train for param in self.model.parameters(): param.requires_grad = False self.embedding_manager = self.instantiate_embedding_manager(personalization_config, self.cond_stage_model) for param in self.embedding_manager.embedding_parameters(): param.requires_grad = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def instantiate_embedding_manager(self, config, embedder): model = instantiate_from_config(config, embedder=embedder) if config.params.get("embedding_manager_ckpt", None): # do not load if missing OR empty string model.load(config.params.embedding_manager_ckpt) return model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c, embedding_manager=self.embedding_manager) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) if self.embedding_reg_weight > 0: loss_embedding_reg = self.embedding_manager.embedding_to_coarse_loss().mean() loss_dict.update({f'{prefix}/loss_emb_reg': loss_embedding_reg}) loss += (self.embedding_reg_weight * loss_embedding_reg) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, shape,cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False, plot_diffusion_rows=False, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
elif isimage(xc):
4
2023-11-30 01:50:29+00:00
24k
Czm369/MixPL
mmdet/configs/rtmdet/rtmdet_ins_s_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of t...
from mmengine.config import read_base from .rtmdet_ins_l_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmengine.hooks.ema_hook import EMAHook from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug) from mmdet.engine.hooks.pipeline_switch_hook import PipelineSwitchHook from mmdet.models.layers.ema import ExpMomentumEMA
18,108
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False),
dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0),
4
2023-11-30 08:58:00+00:00
24k
SEU-ProactiveSecurity-Group/MalPurifier
core/defense/amd_dnn_plus.py
[ { "identifier": "Max", "path": "core/attack/max.py", "snippet": "class Max(BaseAttack):\n \"\"\"\n Max攻击:迭代地从多个攻击方法中选择结果。\n\n 参数\n --------\n @param attack_list: List, 已实例化的攻击对象的列表。\n @param varepsilon: Float, 用于判断收敛性的标量。\n \"\"\"\n\n def __init__(self, attack_list, varepsilon=1e...
import time import os.path as path import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from core.attack.max import Max from core.attack.stepwise_max import StepwiseMax from core.defense.md_dnn import MalwareDetectionDNN from core.defense.amd_template import DetectorTemplate from config import config, logging, ErrorHandler from tools import utils from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score
20,001
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function logger = logging.getLogger('core.defense.amd_dnn_plus') logger.addHandler(ErrorHandler) class AMalwareDetectionDNNPlus(nn.Module, DetectorTemplate): def __init__(self, md_nn_model, input_size, n_classes, ratio=0.95, device='cpu', name='', **kwargs): # 调用父类构造函数 nn.Module.__init__(self) DetectorTemplate.__init__(self) # 初始化输入参数 self.input_size = input_size self.n_classes = n_classes self.ratio = ratio self.device = device self.name = name self.parse_args(**kwargs) # 恶意软件检测模型 if md_nn_model is not None and isinstance(md_nn_model, nn.Module): # 如果提供了已经训练好的模型,就使用这个模型 self.md_nn_model = md_nn_model self.is_fitting_md_model = False # 默认情况下,模型已经被训练 else: # 否则,创建一个新的恶意软件检测模型
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function logger = logging.getLogger('core.defense.amd_dnn_plus') logger.addHandler(ErrorHandler) class AMalwareDetectionDNNPlus(nn.Module, DetectorTemplate): def __init__(self, md_nn_model, input_size, n_classes, ratio=0.95, device='cpu', name='', **kwargs): # 调用父类构造函数 nn.Module.__init__(self) DetectorTemplate.__init__(self) # 初始化输入参数 self.input_size = input_size self.n_classes = n_classes self.ratio = ratio self.device = device self.name = name self.parse_args(**kwargs) # 恶意软件检测模型 if md_nn_model is not None and isinstance(md_nn_model, nn.Module): # 如果提供了已经训练好的模型,就使用这个模型 self.md_nn_model = md_nn_model self.is_fitting_md_model = False # 默认情况下,模型已经被训练 else: # 否则,创建一个新的恶意软件检测模型
self.md_nn_model = MalwareDetectionDNN(self.input_size,
2
2023-11-27 02:00:23+00:00
24k
iann838/pulsefire
tests/test_taskgroups.py
[ { "identifier": "RiotAPIClient", "path": "pulsefire/clients.py", "snippet": "class RiotAPIClient(BaseClient):\n \"\"\"Riot API Client.\n\n | Resources | Support |\n | -------------------- | -------------------------- |\n | League of Legends | ✅ ...
import asyncio import os from pulsefire.clients import RiotAPIClient from pulsefire.functools import async_to_sync from pulsefire.schemas import RiotAPISchema from pulsefire.taskgroups import TaskGroup
16,648
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) async with TaskGroup() as tg: for match_id in match_ids[:20]: await tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id))
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) async with TaskGroup() as tg: for match_id in match_ids[:20]: await tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id))
matches: list[RiotAPISchema.LolMatchV5Match] = tg.results()
2
2023-11-27 13:37:24+00:00
24k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_tess_pv_stochastic.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Opti...
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from numpy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG_PV import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG, PPV from TestCaseDistributionSystems.database_management_pv import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
15,466
for t in range(T): Aeq[t, n_stops * 2 + t] = 1 Aeq[t, n_stops + nb_tra_ele * t:n_stops + nb_tra_ele * (t + 1)] = -mess["EFF_CH"] Aeq[t, nb_tra_ele * t:nb_tra_ele * (t + 1)] = 1 / mess["EFF_DC"] if t == 0: beq[t] = mess["E0"] else: Aeq[t, n_stops * 2 + t - 1] = -1 c = concatenate((ones(n_stops * 2) * mess["COST_OP"], zeros(T))) # sol = milp(c, Aeq=Aeq, beq=beq, A=None, b=None, xmin=lx, xmax=ux) model_tess = {"c": c, "q": zeros(nv), "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq, "NX": nv, } return model_tess def scenario_generation_reduction(self, micro_grids, profile, pns, pv_profile, update=0, ns=2, ns_reduced=2, std=0.03, interval=0.05, std_pv=0.05): """ Scenario generation function for the second-stage scheduling Stochastic variables include 1) loads in distribution networks, active loads for 2) AC bus and 3)DC bus. The assumption is that, the 1) loads in distribution networks follow normal distribution nb*T 2) loads for AC bus and DC bus follow uniform distribution nmg*T*4 :return: """ T = self.T nmg = self.nmg nb = self.nb db_management = DataBaseManagement() if update > 0: # 1) scenario generation bus_load = zeros((ns, nb * T)) mg_load = zeros((ns, nmg * T * 2)) mg_pv = zeros((ns, nmg * T)) weight = ones(ns) / ns for i in range(ns): for t in range(T): for j in range(nb): bus_load[i, t * nb + j] = pns["bus"][j, PD] * (1 + random.normal(0, std)) * profile[t] pv_rand = random.normal(0, std_pv) # all PV are correlated! for j in range(nmg): mg_load[i, t * nmg + j] = micro_grids[j]["PD"]["AC"][t] * \ (1 + random.uniform(-interval, interval)) mg_load[i, nmg * T + t * nmg + j] = micro_grids[j]["PD"]["DC"][t] * \ (1 + random.uniform(-interval, interval)) mg_pv[i, t * nmg + j] = micro_grids[j]["PV"]["PMAX"] * pv_profile[t] * \ (1 + pv_rand) # 2) scenario reduction scenario_reduction = ScenarioReduction() (scenario_reduced, weight_reduced) = \ scenario_reduction.run(scenario=concatenate([bus_load, mg_load, mg_pv], axis=1), weight=weight, n_reduced=ns_reduced, power=2) # 3) Store the data into database db_management.create_table("scenarios", nb=nb, nmg=nmg) for i in range(ns - ns_reduced): for t in range(T): # print(scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist()) db_management.insert_data_scenario("scenarios", scenario=i, weight=weight_reduced[i], time=t, nb=nb, pd=scenario_reduced[i, t * nb:(t + 1) * nb].tolist(), nmg=nmg, pd_ac=scenario_reduced[i, nb * T + t * nmg: nb * T + (t + 1) * nmg].tolist(), pd_dc=scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist(), ppv=scenario_reduced[i, nb * T + nmg * T * 2 + t * nmg: nb * T + nmg * T * 2 + (t + 1) * nmg].tolist()) # print(t) else: # 4) if not updated, inquery the database scenario_reduced = zeros((ns - ns_reduced, nb * T + nmg * T * 3)) weight_reduced = zeros(ns - ns_reduced) for i in range(ns - ns_reduced): for t in range(T): data = db_management.inquery_data_scenario(table_name="scenarios", scenario=i, time=t) weight_reduced[i] = data[1] scenario_reduced[i, nb * t:nb * (t + 1)] = array(data[3:nb + 3]) scenario_reduced[i, nb * T + nmg * t:nb * T + nmg * (t + 1)] = array(data[nb + 3:nb + 3 + nmg]) scenario_reduced[i, nb * T + nmg * T + nmg * t:nb * T + nmg * T + nmg * (t + 1)] = \ array(data[nb + 3 + nmg:nb + 3 + nmg * 2]) scenario_reduced[i, nb * T + nmg * T * 2 + nmg * t:nb * T + nmg * T * 2 + nmg * (t + 1)] = \ array(data[nb + 3 + nmg * 2:nb + 3 + nmg * 3]) # assert sum(weight_reduced) == 1, "The weight factor is not right!" # 4) return value ds_load_profile = scenario_reduced[:, 0:nb * T] mgs_load_profile = scenario_reduced[:, nb * T:nb * T + nmg * T * 2] pv_load_profile = scenario_reduced[:, nb * T + nmg * T * 2:] # profile_second_stage = zeros((ns, T)) microgrids_second_stage = [0] * (ns - ns_reduced) # for i in range(ns): # for j in range(T): # profile_second_stage[i, j] = profile[j] * (1 + 0.5 * random.random()) # for i in range(ns - ns_reduced): microgrids_second_stage[i] = deepcopy(micro_grids) for j in range(nmg): microgrids_second_stage[i][j]["PV"]["PROFILE"] = zeros(T) for t in range(T): microgrids_second_stage[i][j]["PD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] microgrids_second_stage[i][j]["QD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] * 0.2 microgrids_second_stage[i][j]["PD"]["DC"][t] = mgs_load_profile[i, T * nmg + t * nmg + j] microgrids_second_stage[i][j]["PV"]["PROFILE"][t] = pv_load_profile[i, t * nmg + j] return ds_load_profile, microgrids_second_stage, weight_reduced if __name__ == "__main__":
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: zhaoty@ntu.edu.sg @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formulation(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {} db_management = DataBaseManagement() db_management.create_table(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng) db_management.create_table(table_name="micro_grids", nmg=self.nmg) db_management.create_table(table_name="mobile_energy_storage_systems", nmg=self.nmg) db_management.create_table(table_name="first_stage_solutions", nmg=self.nmg, ng=self.ng) db_management.create_table(table_name="fisrt_stage_mess", nmg=self.nmg) for t in range(T): db_management.insert_data_first_stage(table_name="first_stage_solutions", time=t, ng=self.ng, nmg=self.nmg, pg=sol_first_stage["pg"][:, t].tolist(), rg=sol_first_stage["rg"][:, t].tolist(), pg_mg=sol_first_stage["pg_mg"][:, t].tolist(), rg_mg=sol_first_stage["rg_mg"][:, t].tolist(), pess_ch=sol_first_stage["pess_ch"][:, t].tolist(), pess_dc=sol_first_stage["pess_dc"][:, t].tolist(), ress=sol_first_stage["ress"][:, t].tolist(), ess=sol_first_stage["eess"][:, t].tolist(), iess=sol_first_stage["iess"][:, t].tolist()) for i in range(nmes): for t in range(T): db_management.insert_data_first_stage_mess(table_name="fisrt_stage_mess", nmg=self.nmg, time=t, mess=i, imess=sol_first_stage["MESS"][i]["idc"][:, t].tolist(), rmess=sol_first_stage["MESS"][i]["rmess"][:, t].tolist(), pmess_ch= sol_first_stage["MESS"][i]["pmess_ch"][:, t].tolist(), pmess_dc= sol_first_stage["MESS"][i]["pmess_dc"][:, t].tolist(), mess_f_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][0], mess_t_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][1]) for i in range(ns): sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i]) for i in range(ns): for t in range(T): db_management.insert_data_ds(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng, scenario=i, time=t, pij=sol_second_stage_checked[i]["DS"]["pij"][:, t].tolist(), qij=sol_second_stage_checked[i]["DS"]["qij"][:, t].tolist(), lij=sol_second_stage_checked[i]["DS"]["lij"][:, t].tolist(), vi=sol_second_stage_checked[i]["DS"]["vi"][:, t].tolist(), pg=sol_second_stage_checked[i]["DS"]["pg"][:, t].tolist(), qg=sol_second_stage_checked[i]["DS"]["qg"][:, t].tolist(), ) for i in range(ns): for j in range(nmg): for t in range(T): db_management.insert_data_mg(table_name="micro_grids", scenario=i, time=t, mg=j, pg=sol_second_stage_checked[i]["MG"]["pg"][j, t], qg=sol_second_stage_checked[i]["MG"]["qg"][j, t], pug=sol_second_stage_checked[i]["MG"]["pug"][j, t], qug=sol_second_stage_checked[i]["MG"]["qug"][j, t], pbic_ac2dc=sol_second_stage_checked[i]["MG"]["pbic_ac2dc"][j, t], pbic_dc2ac=sol_second_stage_checked[i]["MG"]["pbic_dc2ac"][j, t], qbic=sol_second_stage_checked[i]["MG"]["qbic"][j, t], pess_ch=sol_second_stage_checked[i]["MG"]["pess_ch"][j, t], pess_dc=sol_second_stage_checked[i]["MG"]["pess_dc"][j, t], eess=sol_second_stage_checked[i]["MG"]["eess"][j, t], pmess=sol_second_stage_checked[i]["MG"]["pmess"][j, t], ppv=sol_second_stage_checked[i]["MG"]["ppv"][j, t]) for i in range(ns): for j in range(nmes): for t in range(T): db_management.insert_data_mess(table_name="mobile_energy_storage_systems", scenario=i, time=t, mess=j, nmg=self.nmg, pmess_dc= sol_second_stage_checked[i]["MESS"][j]["pmess_dc"][:, t].tolist(), pmess_ch= sol_second_stage_checked[i]["MESS"][j]["pmess_ch"][:, t].tolist(), emess=sol_second_stage_checked[i]["MESS"][j]["emess"][0, t]) # 4.3) Cross validation of the first-stage and second-stage decision variables tess_check = {} for i in range(ns): tess_temp = {} for j in range(nmes): tess_temp[j] = sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["pmess_dc"] + \ sol_first_stage["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["rmess"] tess_temp[j + nmes] = sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["pmess_ch"] + \ sol_first_stage["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["rmess"] tess_check[i] = tess_temp # return sol_distribution_network, sol_microgrids, sol_tess return sol_first_stage, sol_second_stage_checked def first_stage_problem_formulation(self, pns, mgs, mess, tns): """ Problem formulation for the first stage optimization, Decision variables include, DGs within power networks, DGs within MGs, EESs within MGs and TESSs :param power_networks: Parameters for the power networks :param micro_grids: Parameters for the microgrids :param tess: Parameters for the mobile energy storage systems :param traffic_networks: Parameters for the transportation networks :return: Formulated first-stage problem """ T = self.T # Time slots nmg = self.nmg # Number of mgs nmes = self.nmes # Number of tess mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] ng = shape(mpc['gen'])[0] ## number of dispatchable injections nb = shape(mpc["bus"])[0] self.nb = nb self.ng = ng # Obtain the initial status, start-up and shut down of generators Ig0 = gen[:, -1].astype(int) MIN_DOWN = gen[:, -2].astype(int) MIN_UP = gen[:, -3].astype(int) alpha_l = zeros(ng) beta_l = zeros(ng) Ig_l = zeros(ng) pg_l = zeros(ng) # Boundary for DGs within distribution networks rg_l = zeros(ng) alpha_u = ones(ng) beta_u = ones(ng) Ig_u = ones(ng) pg_u = gen[:, PMAX] / baseMVA rg_u = gen[:, PMAX] / baseMVA c_alpha = gencost[:, 0] c_beta = gencost[:, 1] c_ig = gencost[:, 6] cg = gencost[:, 5] * baseMVA cr = zeros(ng) pg_mg_l = zeros(nmg) # Boundary for DGs within MGs rg_mg_l = zeros(nmg) pg_mg_u = zeros(nmg) rg_mg_u = zeros(nmg) cg_mg = zeros(nmg) cr_mg = zeros(nmg) for i in range(nmg): pg_mg_l[i] = mgs[i]["DG"]["PMIN"] pg_mg_u[i] = mgs[i]["DG"]["PMAX"] rg_mg_u[i] = mgs[i]["DG"]["PMAX"] cg_mg[i] = mgs[i]["DG"]["COST_B"] pes_ch_l = zeros(nmg) # Lower boundary for ESSs within MGs pes_dc_l = zeros(nmg) ees_l = zeros(nmg) res_l = zeros(nmg) ies_l = zeros(nmg) pes_ch_u = zeros(nmg) # Upper boundary for ESSs within MGs pes_dc_u = zeros(nmg) ees_u = zeros(nmg) res_u = zeros(nmg) ies_u = ones(nmg) ces_ch = zeros(nmg) # Cost boundary for ESSs within MGs ces_dc = zeros(nmg) ces_r = zeros(nmg) ces = zeros(nmg) ces_i = zeros(nmg) for i in range(nmg): pes_ch_u[i] = mgs[i]["ESS"]["PCH_MAX"] pes_dc_u[i] = mgs[i]["ESS"]["PDC_MAX"] + mgs[i]["ESS"]["PCH_MAX"] res_u[i] = mgs[i]["ESS"]["PCH_MAX"] ees_l[i] = mgs[i]["ESS"]["EMIN"] ees_u[i] = mgs[i]["ESS"]["EMAX"] _nv_first_stage = ng * 5 + nmg * 2 + nmg * 5 nv_first_stage = _nv_first_stage * T # Formulate the boundaries lb = concatenate( [tile(concatenate( [alpha_l, beta_l, Ig_l, pg_l, rg_l, pg_mg_l, rg_mg_l, pes_ch_l, pes_dc_l, res_l, ees_l, ies_l]), T)]) ub = concatenate( [tile(concatenate( [alpha_u, beta_u, Ig_u, pg_u, rg_u, pg_mg_u, rg_mg_u, pes_ch_u, pes_dc_u, res_u, ees_u, ies_u]), T)]) # Objective value c = concatenate( [tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cg_mg, cr_mg, ces_ch, ces_dc, ces, ces_r, ces_i]), T)]) # Variable types vtypes = (["b"] * ng * 3 + ["c"] * (ng * 2 + nmg * 2 + nmg * 4) + ["b"] * nmg) * T ## Constraint sets # 1) Pg+Rg<=PguIg A = lil_matrix((ng * T, nv_first_stage)) b = zeros(ng * T) for t in range(T): for j in range(ng): A[t * ng + j, t * _nv_first_stage + ng * 3 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 2 + j] = -pg_u[j] # 2) Pg-Rg>=IgPgl A_temp = lil_matrix((ng * T, nv_first_stage)) b_temp = zeros(ng * T) for t in range(T): for j in range(ng): A_temp[t * ng + j, t * _nv_first_stage + ng * 3 + j] = -1 A_temp[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A_temp[t * ng + j, t * _nv_first_stage + j] = pg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 3) Start-up and shut-down constraints of DGs UP_LIMIT = zeros(ng).astype(int) DOWN_LIMIT = zeros(ng).astype(int) for i in range(ng): UP_LIMIT[i] = T - MIN_UP[i] DOWN_LIMIT[i] = T - MIN_DOWN[i] # 3.1) Up limit A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage)) b_temp = zeros(sum(UP_LIMIT)) for i in range(ng): for t in range(MIN_UP[i], T): for k in range(t - MIN_UP[i], t): A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + i] = 1 A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * 2 + i] = -1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # # 3.2) Down limit A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage)) b_temp = ones(sum(DOWN_LIMIT)) for i in range(ng): for t in range(MIN_DOWN[i], T): for k in range(t - MIN_DOWN[i], t): A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng + i] = 1 A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * 2 + i] = 1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Status transformation of each unit Aeq = lil_matrix((T * ng, nv_first_stage)) beq = zeros(T * ng) for i in range(ng): for t in range(T): Aeq[i * T + t, t * _nv_first_stage + i] = 1 Aeq[i * T + t, t * _nv_first_stage + ng + i] = -1 Aeq[i * T + t, t * _nv_first_stage + ng * 2 + i] = -1 if t != 0: Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * 2 + i] = 1 else: beq[i * T + t] = -Ig0[i] # 3) Pg_mg+Rg_mg<=Pg_mg_u A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Pg_mg-Rg_mg<=Pg_mg_l A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 5) Pess_dc-Pess_ch+Ress<=Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 6) Pess_ch-Pess_dc+Ress<=Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 7) Energy storage balance equation Aeq_temp = lil_matrix((T * nmg, nv_first_stage)) beq_temp = zeros(T * nmg) for t in range(T): for j in range(nmg): Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = 1 Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -mgs[j]["ESS"]["EFF_CH"] Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 / mgs[j]["ESS"]["EFF_DC"] if t == 0: beq_temp[t * nmg + j] = mgs[j]["ESS"]["E0"] else: Aeq_temp[t * nmg + j, (t - 1) * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 8) Pess_ch<=I*Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = -pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 9) Pess_dc<=(1-I)*Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = pes_dc_u[j] b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 2) Transportation energy storage systems problem model_mess = {} for i in range(nmes): model_mess[i] = self.problem_formulation_tess(mess=mess[i], tns=tns) # 3) Merge the DGs, ESSs and TESSs neq = Aeq.shape[0] nineq = A.shape[0] nV_index = zeros(nmes + 1).astype(int) neq_index = zeros(nmes + 1).astype(int) nineq_index = zeros(nmes + 1).astype(int) nV_index[0] = nv_first_stage neq_index[0] = neq nineq_index[0] = nineq for i in range(nmes): nV_index[i + 1] = nV_index[i] + len(model_mess[i]["c"]) neq_index[i + 1] = neq_index[i] + model_mess[i]["Aeq"].shape[0] nineq_index[i + 1] = nineq_index[i] + model_mess[i]["A"].shape[0] neq += model_mess[i]["Aeq"].shape[0] nineq += model_mess[i]["A"].shape[0] # Merge the objective function, boundaries, types and rhs c = concatenate([c, model_mess[i]["c"]]) lb = concatenate([lb, model_mess[i]["lb"]]) ub = concatenate([ub, model_mess[i]["ub"]]) vtypes += model_mess[i]["vtypes"] beq = concatenate([beq, model_mess[i]["beq"]]) b = concatenate([b, model_mess[i]["b"]]) A_full = lil_matrix((nineq_index[-1], nV_index[-1])) Aeq_full = lil_matrix((neq_index[-1], nV_index[-1])) if Aeq is not None: Aeq_full[0:int(neq_index[0]), 0:int(nV_index[0])] = Aeq if A is not None: A_full[0:int(nineq_index[0]), 0:int(nV_index[0])] = A for i in range(nmes): Aeq_full[neq_index[i]:neq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["Aeq"] A_full[nineq_index[i]:nineq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["A"] self.nv_first_stage = nV_index[-1] # The number of first stage decision variables self._nv_first_stage = _nv_first_stage model_first_stage = {"c": c, "lb": lb, "ub": ub, "vtypes": vtypes, "A": A_full, "b": b, "Aeq": Aeq_full, "beq": beq, } return model_first_stage def first_stage_solution_validation(self, sol): """ Validation of the first-stage solution :param sol: The first stage solution :return: the first stage solution """ T = self.T ng = self.ng nmg = self.nmg nmes = self.nmes # Set-points of DGs within DSs, MGs and ESSs _nv_first_stage = self._nv_first_stage alpha = zeros((ng, T)) beta = zeros((ng, T)) Ig = zeros((ng, T)) Pg = zeros((ng, T)) Rg = zeros((ng, T)) Pg_mg = zeros((nmg, T)) Rg_mg = zeros((nmg, T)) Pess_dc = zeros((nmg, T)) Pess_ch = zeros((nmg, T)) Ress = zeros((nmg, T)) Eess = zeros((nmg, T)) Iess = zeros((nmg, T)) for i in range(T): alpha[:, i] = sol[_nv_first_stage * i:_nv_first_stage * i + ng] beta[:, i] = sol[_nv_first_stage * i + ng:_nv_first_stage * i + ng * 2] Ig[:, i] = sol[_nv_first_stage * i + ng * 2:_nv_first_stage * i + ng * 3] Pg[:, i] = sol[_nv_first_stage * i + ng * 3:_nv_first_stage * i + ng * 4] Rg[:, i] = sol[_nv_first_stage * i + ng * 4:_nv_first_stage * i + ng * 5] Pg_mg[:, i] = sol[_nv_first_stage * i + ng * 5:_nv_first_stage * i + ng * 5 + nmg] Rg_mg[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg:_nv_first_stage * i + ng * 5 + nmg * 2] Pess_ch[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 2:_nv_first_stage * i + ng * 5 + nmg * 3] Pess_dc[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 3:_nv_first_stage * i + ng * 5 + nmg * 4] Ress[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 4:_nv_first_stage * i + ng * 5 + nmg * 5] Eess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 5:_nv_first_stage * i + ng * 5 + nmg * 6] Iess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 6:_nv_first_stage * i + ng * 5 + nmg * 7] # Set-points and scheduling of mobile energy storage systems nv_tra = self.nv_tra nl_traffic = self.nl_tra n_stops = self.n_stops nb_tra_ele = self.nb_tra_ele sol_ev = {} for i in range(nmes): ev_temp = {} ev_temp["VRP"] = [] for t in range(nl_traffic): if sol[_nv_first_stage * T + nv_tra * i + t] > 0: # obtain the solution for vrp if self.connection_matrix[t, TIME] > 0: for j in range(int(self.connection_matrix[t, TIME])): ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) else: ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) ev_temp["idc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_dc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_ch"] = zeros((nb_tra_ele, T)) ev_temp["rmess"] = zeros((nb_tra_ele, T)) for t in range(T): for k in range(nb_tra_ele): ev_temp["idc"][k, t] = sol[_nv_first_stage * T + nv_tra * i + nl_traffic + nb_tra_ele * t + k] ev_temp["pmess_dc"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops + nb_tra_ele * t + k] ev_temp["pmess_ch"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 2 + nb_tra_ele * t + k] ev_temp["rmess"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 3 + nb_tra_ele * t + k] sol_ev[i] = ev_temp sol_first_stage = {"alpha": alpha, "beta": beta, "ig": Ig, "rg": Rg, "pg": Pg, "pg_mg": Pg_mg, "rg_mg": Rg_mg, "pess_ch": Pess_ch, "pess_dc": Pess_dc, "ress": Ress, "eess": Eess, "iess": Iess, "MESS": sol_ev, } return sol_first_stage def second_stage_problem_formualtion(self, pns, mgs, mess, tns, profile, index=0, weight=1): """ Second-stage problem formulation, the decision variables includes DGs within power networks, DGs within MGs, EESs within MGs and TESSs and other systems' information :param power_networks: :param micro_grids: :param tess: :param traffic_networks: :return: The second stage problems as list, including coupling constraints, and other constraint set """ # I) Formulate the problem for distribution systems operator T = self.T mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] nb = shape(mpc['bus'])[0] ## number of buses nl = shape(mpc['branch'])[0] ## number of branches ng = shape(mpc['gen'])[0] ## number of dispatchable injections nmg = self.nmg nmes = self.nmes self.nl = nl self.nb = nb self.ng = ng m = zeros(nmg) ## list of integration index pmg_l = zeros(nmg) ## list of lower boundary pmg_u = zeros(nmg) ## list of upper boundary qmg_l = zeros(nmg) ## list of lower boundary qmg_u = zeros(nmg) ## list of upper boundary for i in range(nmg): m[i] = mgs[i]["BUS"] pmg_l[i] = mgs[i]["UG"]["PMIN"] / 1000 / baseMVA pmg_u[i] = mgs[i]["UG"]["PMAX"] / 1000 / baseMVA qmg_l[i] = mgs[i]["UG"]["QMIN"] / 1000 / baseMVA qmg_u[i] = mgs[i]["UG"]["QMAX"] / 1000 / baseMVA f = branch[:, F_BUS] ## list of "from" buses t = branch[:, T_BUS] ## list of "to" buses i = range(nl) ## double set of row indices self.f = f ## record from bus for each branch # Connection matrix Cf = sparse((ones(nl), (i, f)), (nl, nb)) Ct = sparse((ones(nl), (i, t)), (nl, nb)) Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) Cmg = sparse((ones(nmg), (m, range(nmg))), (nb, nmg)) Branch_R = branch[:, BR_R] Branch_X = branch[:, BR_X] Cf = Cf.T Ct = Ct.T # Obtain the boundary information slmax = branch[:, RATE_A] / baseMVA pij_l = -slmax qij_l = -slmax lij_l = zeros(nl) vm_l = bus[:, VMIN] ** 2 pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = 2 * gen[:, PMAX] / baseMVA qg_u = 2 * gen[:, QMAX] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg))]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1 Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1 Aeq_temp = hstack([Ax2y, Ay2x]) beq_temp = zeros(2 * nmg * T) Aeq_full = vstack([Aeq_full, Aeq_temp]) beq = concatenate([beq, beq_temp]) # III) Formulate the optimization problem for tess in the second stage optimization model_tess = {} for i in range(nmes): model_tess[i] = self.problem_formulation_tess_second_stage(mess=mess[i]) # III.1) Merge the models of mirogrids and distribution # Formulate the index nv_index_ev = zeros(1 + nmes).astype(int) neq_index_temp = zeros(1 + nmes).astype(int) nv_index_ev[0] = int(Aeq_full.shape[1]) neq_index_temp[0] = int(Aeq_full.shape[0]) for i in range(nmes): nv_index_ev[i + 1] = nv_index_ev[i] + len(model_tess[i]["c"]) neq_index_temp[i + 1] = neq_index_temp[i] + model_tess[i]["Aeq"].shape[0] Aeq = lil_matrix((int(neq_index_temp[-1]), int(nv_index_ev[-1]))) Aeq[0:int(neq_index_temp[0]), 0:int(nv_index_ev[0])] = Aeq_full for i in range(nmes): lb = concatenate([lb, model_tess[i]["lb"]]) ub = concatenate([ub, model_tess[i]["ub"]]) c = concatenate([c, model_tess[i]["c"]]) q = concatenate([q, model_tess[i]["q"]]) vtypes += model_tess[i]["vtypes"] beq = concatenate([beq, model_tess[i]["beq"]]) Aeq[neq_index_temp[i]:neq_index_temp[i + 1], nv_index_ev[i]:nv_index_ev[i + 1]] = model_tess[i]["Aeq"] # III.2) Coupling constraints between the microgrids and mobile energy storage systems # Additional equal constraints, nmg*T Aeq_temp = lil_matrix((nmg * T, nv_index_ev[-1])) beq_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Aeq_temp[i * T + t, nv_index[i] + t * NX_MG + PMESS] = 1 # TESSs injections to the MGs for j in range(nmes): Aeq_temp[i * T + t, nv_index_ev[j] + t * self.nb_tra_ele + i] = -1 # Discharging Aeq_temp[i * T + t, nv_index_ev[j] + self.nb_tra_ele * T + t * self.nb_tra_ele + i] = 1 # Sort by order Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC), int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + QBIC)], [1, 1, 1, 1, 1]] Rc_temp[i * T + t] = mgs[i]["BIC"]["SMAX"] ** 2 Rc = concatenate([Rc, Rc_temp]) ## IV. Coupling constraints between the first stage and second stage decision variables # pg, pg_mg, pess_mg, pess_tess # Ts*x+Ws*ys<=hs ## IV) Formulate the coupling constraints between the first-stage and second-stage problems # 1) -Pg -Rg + pg <= 0 _nv_first_stage = self._nv_first_stage Ts = lil_matrix((ng * T, nv_first_stage)) Ws = lil_matrix((ng * T, nv_second_stage)) hs = zeros(ng * T) for i in range(T): for j in range(ng): Ts[i * ng + j, i * _nv_first_stage + ng * 3 + j] = -1 Ts[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = 1 # 2) Pg-Rg - pg <= 0 Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 3 + j] = 1 Ts_temp[i * ng + j, i * _nv_first_stage + ng * 4 + j] = -1 Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) Qg <= IgQg_max Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = -qg_u[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Qg >= IgQg_min Ts_temp = lil_matrix((ng * T, nv_first_stage)) Ws_temp = lil_matrix((ng * T, nv_second_stage)) hs_temp = zeros(ng * T) for i in range(T): for j in range(ng): Ts_temp[i * ng + j, i * _nv_first_stage + ng * 2 + j] = qg_l[j] Ws_temp[i * ng + j, i * _nv_second_stage + 3 * nl + nb + ng + j] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 3) -Pg_mg - Rg_mg + pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = -1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 4) Pg_mg - Rg_mg - pg_mg <= 0 Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + j] = 1 Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg + j] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PG] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 5) pess_dc - pess_ch <= Pess_dc - Pess_ch + Ress Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 # Charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 3 + j] = -1 # Dis-charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 4 + j] = -1 # Reserve Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_CH] = -1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_DC] = 1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 6) pess_ch - pess_dc <= Pess_ch - Pess_dc + Ress Ts_temp = lil_matrix((nmg * T, nv_first_stage)) Ws_temp = lil_matrix((nmg * T, nv_second_stage)) hs_temp = zeros(nmg * T) for i in range(T): for j in range(nmg): Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 # Charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 3 + j] = 1 # Dis-charging Ts_temp[i * nmg + j, i * _nv_first_stage + ng * 5 + nmg * 4 + j] = -1 # Reserve Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_CH] = 1 Ws_temp[i * nmg + j, nv_index[j] + i * NX_MG + PESS_DC] = -1 Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 7) ptss_ch - ptss_dc <= Ptss_ch - Ptss_dc + Rtss nv_tra = self.nv_tra nl_tra = self.nl_tra Ts_temp = lil_matrix((nmg * T * nmes, nv_first_stage)) Ws_temp = lil_matrix((nmg * T * nmes, nv_second_stage)) hs_temp = zeros(nmg * T * nmes) for i in range(nmes): Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T:_nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2] = eye( nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3] = -eye(nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 4] = -eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, nv_index_ev[i] + nmg * T * 0:nv_index_ev[i] + nmg * T * 1] = \ -eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, nv_index_ev[i] + nmg * T * 1:nv_index_ev[i] + nmg * T * 2] = \ eye(nmg * T) Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # 8) ptss_dc - ptss_ch <= Ptss_dc - Ptss_ch + Rtss Ts_temp = lil_matrix((nmg * T * nmes, nv_first_stage)) Ws_temp = lil_matrix((nmg * T * nmes, nv_second_stage)) hs_temp = zeros(nmg * T * nmes) for i in range(nmes): Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2] = \ -eye(nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 2: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3] = \ eye(nmg * T) Ts_temp[i * nmg * T:(i + 1) * nmg * T, _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 3: _nv_first_stage * T + nv_tra * i + nl_tra + nmg * T * 4] = \ -eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, int(nv_index_ev[i]) + nmg * T * 0: int(nv_index_ev[i]) + nmg * T * 1] = eye(nmg * T) Ws_temp[i * nmg * T:(i + 1) * nmg * T, int(nv_index_ev[i]) + nmg * T * 1: int(nv_index_ev[i]) + nmg * T * 2] = -eye(nmg * T) Ts = vstack((Ts, Ts_temp)) Ws = vstack((Ws, Ws_temp)) hs = concatenate((hs, hs_temp)) # sol = miqcp(c, q, Aeq=Aeq, beq=beq, A=None, b=None, Qc=Qc, xmin=lx, xmax=ux) model_second_stage = {"c": c * weight, "q": q * weight, "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq, "Qc": Qc, "rc": Rc, "c0": c0, "Ts": Ts, "Ws": Ws, "hs": hs} return model_second_stage def second_stage_solution_validation(self, sol): """ :param sol: The second stage solution under specific scenario :return: for each value """ T = self.T nb = self.nb ng = self.ng nl = self.nl nmg = self.nmg nmes = self.nmes f = self.f # Solutions for distribution networks ds_sol = {} _nv_second_stage = self._nv_second_stage ds_sol["pij"] = zeros((nl, T)) ds_sol["qij"] = zeros((nl, T)) ds_sol["lij"] = zeros((nl, T)) ds_sol["vi"] = zeros((nb, T)) ds_sol["pg"] = zeros((ng, T)) ds_sol["qg"] = zeros((ng, T)) ds_sol["pmg"] = zeros((nmg, T)) ds_sol["qmg"] = zeros((nmg, T)) ds_sol["gap"] = zeros((nl, T)) for i in range(T): ds_sol["pij"][:, i] = sol[_nv_second_stage * i:_nv_second_stage * i + nl] ds_sol["qij"][:, i] = sol[_nv_second_stage * i + nl:_nv_second_stage * i + nl * 2] ds_sol["lij"][:, i] = sol[_nv_second_stage * i + nl * 2:_nv_second_stage * i + nl * 3] ds_sol["vi"][:, i] = sol[_nv_second_stage * i + nl * 3:_nv_second_stage * i + nl * 3 + nb] ds_sol["pg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb:_nv_second_stage * i + nl * 3 + nb + ng] ds_sol["qg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb + ng: _nv_second_stage * i + nl * 3 + nb + ng * 2] ds_sol["pmg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb + ng * 2: _nv_second_stage * i + nl * 3 + nb + ng * 2 + nmg] ds_sol["qmg"][:, i] = sol[_nv_second_stage * i + nl * 3 + nb + ng * 2 + nmg: _nv_second_stage * i + nl * 3 + nb + ng * 2 + nmg * 2] for j in range(nl): ds_sol["gap"][j, i] = ds_sol["pij"][j, i] ** 2 + ds_sol["qij"][j, i] ** 2 - \ ds_sol["lij"][j, i] * ds_sol["vi"][int(f[j]), i] # Solutions for the microgrids mg_sol = {} mg_sol["pg"] = zeros((nmg, T)) mg_sol["qg"] = zeros((nmg, T)) mg_sol["pug"] = zeros((nmg, T)) mg_sol["qug"] = zeros((nmg, T)) mg_sol["pbic_ac2dc"] = zeros((nmg, T)) mg_sol["pbic_dc2ac"] = zeros((nmg, T)) mg_sol["qbic"] = zeros((nmg, T)) mg_sol["pess_ch"] = zeros((nmg, T)) mg_sol["pess_dc"] = zeros((nmg, T)) mg_sol["eess"] = zeros((nmg, T)) mg_sol["pmess"] = zeros((nmg, T)) mg_sol["ppv"] = zeros((nmg, T)) for i in range(nmg): for t in range(T): mg_sol["pg"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PG] mg_sol["qg"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + QG] mg_sol["pug"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PUG] mg_sol["qug"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + QUG] mg_sol["pbic_ac2dc"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PBIC_AC2DC] mg_sol["pbic_dc2ac"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PBIC_DC2AC] mg_sol["qbic"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + QBIC] mg_sol["pess_ch"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PESS_CH] mg_sol["pess_dc"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PESS_DC] mg_sol["eess"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + EESS] mg_sol["ppv"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PPV] mg_sol["pmess"][i, t] = sol[_nv_second_stage * T + NX_MG * T * i + NX_MG * t + PMESS] mg_sol["gap"] = mg_sol["pbic_ac2dc"].__mul__(mg_sol["pbic_dc2ac"]) # Solutions for the mess n_stops = self.n_stops mess_sol = {} for i in range(nmes): mess_temp = {} mess_temp["pmess_dc"] = zeros((nmg, T)) mess_temp["pmess_ch"] = zeros((nmg, T)) mess_temp["emess"] = zeros((1, T)) for t in range(T): mess_temp["pmess_dc"][:, t] = \ sol[_nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + nmg * t: _nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + nmg * (t + 1)] mess_temp["pmess_ch"][:, t] = \ sol[_nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + n_stops + nmg * t: _nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + n_stops + nmg * (t + 1)] mess_temp["emess"][:, t] = \ sol[_nv_second_stage * T + NX_MG * T * nmg + (2 * n_stops + T) * i + n_stops * 2 + t] mess_sol[i] = mess_temp second_stage_solution = {} second_stage_solution["DS"] = ds_sol second_stage_solution["MG"] = mg_sol second_stage_solution["MESS"] = mess_sol return second_stage_solution def problem_formulation_microgrid(self, mg, mess): """ Unit commitment problem formulation of single micro_grid :param micro_grid: :return: """ try: T = self.T except: T = 24 nmes = self.nmes pmess_l = 0 pmess_u = 0 for i in range(nmes): pmess_l -= mess[i]["PCMAX"] pmess_u += mess[i]["PDMAX"] ## 1) boundary information and objective function nv = NX_MG * T lb = zeros(nv) ub = zeros(nv) c = zeros(nv) q = zeros(nv) vtypes = ["c"] * nv for t in range(T): ## 1.1) lower boundary lb[t * NX_MG + PG] = 0 lb[t * NX_MG + QG] = mg["DG"]["QMIN"] lb[t * NX_MG + PUG] = 0 lb[t * NX_MG + QUG] = mg["UG"]["QMIN"] lb[t * NX_MG + PBIC_DC2AC] = 0 lb[t * NX_MG + PBIC_AC2DC] = 0 lb[t * NX_MG + QBIC] = -mg["BIC"]["SMAX"] lb[t * NX_MG + PESS_CH] = 0 lb[t * NX_MG + PESS_DC] = 0 lb[t * NX_MG + EESS] = mg["ESS"]["EMIN"] lb[t * NX_MG + PPV] = 0 lb[t * NX_MG + PMESS] = pmess_l ## 1.2) upper boundary ub[t * NX_MG + PG] = mg["DG"]["PMAX"] ub[t * NX_MG + QG] = mg["DG"]["QMAX"] ub[t * NX_MG + PUG] = mg["UG"]["PMAX"] ub[t * NX_MG + QUG] = mg["UG"]["QMAX"] ub[t * NX_MG + PBIC_DC2AC] = mg["BIC"]["PMAX"] ub[t * NX_MG + PBIC_AC2DC] = mg["BIC"]["PMAX"] ub[t * NX_MG + QBIC] = mg["BIC"]["SMAX"] ub[t * NX_MG + PESS_CH] = mg["ESS"]["PCH_MAX"] ub[t * NX_MG + PESS_DC] = mg["ESS"]["PDC_MAX"] ub[t * NX_MG + EESS] = mg["ESS"]["EMAX"] ub[t * NX_MG + PPV] = mg["PV"]["PROFILE"][t] ub[t * NX_MG + PMESS] = pmess_u ## 1.3) Objective functions c[t * NX_MG + PG] = mg["DG"]["COST_A"] c[t * NX_MG + PESS_CH] = mg["ESS"]["COST_OP"] c[t * NX_MG + PESS_DC] = mg["ESS"]["COST_OP"] c[t * NX_MG + PPV] = mg["PV"]["COST"] # c[t * NX_MG + PBIC_AC2DC] = mg["ESS"]["COST_OP"] # c[t * NX_MG + PBIC_DC2AC] = mg["ESS"]["COST_OP"] # c[t * NX_MG + PUG] = mg["DG"]["COST_A"] # c[t * NX_MG + PMESS] = 0.001 ## 1.4) Upper and lower boundary information if t == T - 1: lb[t * NX_MG + EESS] = mg["ESS"]["E0"] ub[t * NX_MG + EESS] = mg["ESS"]["E0"] # 2) Formulate the equal constraints # 2.1) Power balance equation # a) AC bus equation Aeq = lil_matrix((T, nv)) beq = zeros(T) for t in range(T): Aeq[t, t * NX_MG + PG] = 1 Aeq[t, t * NX_MG + PUG] = 1 Aeq[t, t * NX_MG + PBIC_AC2DC] = -1 Aeq[t, t * NX_MG + PBIC_DC2AC] = mg["BIC"]["EFF_DC2AC"] beq[t] = mg["PD"]["AC"][t] # b) DC bus equation Aeq_temp = lil_matrix((T, nv)) beq_temp = zeros(T) for t in range(T): Aeq_temp[t, t * NX_MG + PBIC_AC2DC] = mg["BIC"]["EFF_AC2DC"] Aeq_temp[t, t * NX_MG + PBIC_DC2AC] = -1 Aeq_temp[t, t * NX_MG + PESS_CH] = -1 Aeq_temp[t, t * NX_MG + PESS_DC] = 1 Aeq_temp[t, t * NX_MG + PPV] = 1 Aeq_temp[t, t * NX_MG + PMESS] = 1 # The power injection from mobile energy storage systems beq_temp[t] = mg["PD"]["DC"][t] Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # c) AC reactive power balance equation Aeq_temp = lil_matrix((T, nv)) beq_temp = zeros(T) for t in range(T): Aeq_temp[t, t * NX_MG + QUG] = 1 Aeq_temp[t, t * NX_MG + QBIC] = 1 Aeq_temp[t, t * NX_MG + QG] = 1 beq_temp[t] = mg["QD"]["AC"][t] Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 2.2) Energy storage balance equation Aeq_temp = lil_matrix((T, nv)) beq_temp = zeros(T) for t in range(T): Aeq_temp[t, t * NX_MG + EESS] = 1 Aeq_temp[t, t * NX_MG + PESS_CH] = -mg["ESS"]["EFF_CH"] Aeq_temp[t, t * NX_MG + PESS_DC] = 1 / mg["ESS"]["EFF_DC"] if t == 0: beq_temp[t] = mg["ESS"]["E0"] else: Aeq_temp[t, (t - 1) * NX_MG + EESS] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 3) Formualte inequality constraints # There is no inequality constraint. # sol = milp(c, Aeq=Aeq, beq=beq, A=None, b=None, xmin=lb, xmax=ub) model_micro_grid = {"c": c, "q": q, "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq } return model_micro_grid def problem_formulation_tess(self, mess, tns): """ Problem formulation for transportation energy storage scheduling, including vehicle routine problem and etc. :param tess: specific tess information :param traffic_network: transportation network information :return: """ nb_tra = self.nb_tra T = self.T nb = self.nb nl_tra = tns["branch"].shape[0] # Formulate the connection matrix between the transportation networks and power networks connection_matrix = zeros(((2 * nl_tra + nb_tra) * T, 4)) weight = zeros((2 * nl_tra + nb_tra) * T) for i in range(T): for j in range(nl_tra): # Add from matrix connection_matrix[i * (2 * nl_tra + nb_tra) + j, F_BUS] = tns["branch"][j, F_BUS] + i * nb_tra connection_matrix[i * (2 * nl_tra + nb_tra) + j, T_BUS] = tns["branch"][j, T_BUS] + \ tns["branch"][j, TIME] * nb_tra + i * nb_tra weight[i * (2 * nl_tra + nb_tra) + j] = 1 connection_matrix[i * (2 * nl_tra + nb_tra) + j, TIME] = tns["branch"][j, TIME] for j in range(nl_tra): # Add to matrix connection_matrix[i * (2 * nl_tra + nb_tra) + j + nl_tra, F_BUS] = tns["branch"][j, T_BUS] + i * nb_tra connection_matrix[i * (2 * nl_tra + nb_tra) + j + nl_tra, T_BUS] = tns["branch"][j, F_BUS] + \ tns["branch"][j, TIME] * nb_tra + \ i * nb_tra weight[i * (2 * nl_tra + nb_tra) + j + nl_tra] = 1 connection_matrix[i * (2 * nl_tra + nb_tra) + j + nl_tra, TIME] = tns["branch"][j, TIME] for j in range(nb_tra): connection_matrix[i * (2 * nl_tra + nb_tra) + 2 * nl_tra + j, F_BUS] = j + i * nb_tra connection_matrix[i * (2 * nl_tra + nb_tra) + 2 * nl_tra + j, T_BUS] = j + (i + 1) * nb_tra if tns["bus"][j, LOCATION] >= 0: connection_matrix[i * (2 * nl_tra + nb_tra) + 2 * nl_tra + j, 3] = tns["bus"][j, LOCATION] + i * nb # Delete the out of range lines index = find(connection_matrix[:, T_BUS] < T * nb_tra) connection_matrix = connection_matrix[index, :] weight = weight[index] # add two virtual nodes to represent the initial and end status of vehicles # special attention should be paid here, as the original index has been modified! connection_matrix[:, F_BUS] += 1 connection_matrix[:, T_BUS] += 1 # From matrix temp = zeros((nb_tra, 4)) weight_temp = zeros(nb_tra) for i in range(nb_tra): temp[i, 1] = i + 1 connection_matrix = concatenate([temp, connection_matrix]) weight = concatenate([weight_temp, weight]) # To matrix for i in range(nb_tra): temp = zeros((1, 4)) temp[0, 0] = nb_tra * (T - 1) + i + 1 temp[0, 1] = nb_tra * T + 1 if tns["bus"][i, LOCATION] >= 0: temp[0, 3] = tns["bus"][i, LOCATION] + (T - 1) * nb connection_matrix = concatenate([connection_matrix, temp]) weight_temp = zeros(1) weight = concatenate([weight, weight_temp]) # Status transition matrix nl_tra = connection_matrix.shape[0] # 0 represents that, the bus is not within the power networks nb_tra_ele = sum((tns["bus"][:, 2]) >= 0) status_matrix = zeros((T, nl_tra)) for i in range(T): for j in range(nl_tra): if connection_matrix[j, F_BUS] >= i * nb_tra + 1 and connection_matrix[j, F_BUS] < (i + 1) * nb_tra + 1: status_matrix[i, j] = 1 if connection_matrix[j, F_BUS] <= i * nb_tra + 1 and connection_matrix[j, T_BUS] > (i + 1) * nb_tra + 1: status_matrix[i, j] = 1 # Update connection matrix connection_matrix_f = zeros((T * nb_tra + 2, nl_tra)) connection_matrix_t = zeros((T * nb_tra + 2, nl_tra)) for i in range(T * nb_tra + 2): connection_matrix_f[i, find(connection_matrix[:, F_BUS] == i)] = 1 connection_matrix_t[i, find(connection_matrix[:, T_BUS] == i)] = 1 n_stops = find(connection_matrix[:, 3]).__len__() assert n_stops == nb_tra_ele * T, "The number of bus stop is not right!" nv_tra = nl_tra + 4 * n_stops # Status transition, discharging status, charging rate, discharging rate, spinning reserve lx = zeros(nv_tra) ux = ones(nv_tra) self.nv_tra = nv_tra self.nl_tra = nl_tra self.n_stops = n_stops self.nb_tra_ele = nb_tra_ele self.connection_matrix = connection_matrix ux[nl_tra + 0 * n_stops:nl_tra + 1 * n_stops] = 1 # The locations ux[nl_tra + 1 * n_stops:nl_tra + 2 * n_stops] = mess["PDMAX"] ux[nl_tra + 2 * n_stops:nl_tra + 3 * n_stops] = mess["PCMAX"] ux[nl_tra + 3 * n_stops:nl_tra + 4 * n_stops] = mess["PCMAX"] + mess["PDMAX"] # The initial location and stop location lx[find(connection_matrix[:, F_BUS] == 0)] = mess["initial"] ux[find(connection_matrix[:, F_BUS] == 0)] = mess["initial"] lx[find(connection_matrix[:, T_BUS] == T * nb_tra + 1)] = mess["end"] ux[find(connection_matrix[:, T_BUS] == T * nb_tra + 1)] = mess["end"] vtypes = ["b"] * nl_tra + ["b"] * n_stops + ["c"] * 3 * n_stops Aeq = connection_matrix_f - connection_matrix_t beq = zeros(T * nb_tra + 2) beq[0] = 1 beq[-1] = -1 # statue constraints Aeq_temp = status_matrix beq_temp = ones(T) Aeq = concatenate([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) neq_traffic = Aeq.shape[0] # Fulfill the missing zeros Aeq = concatenate([Aeq, zeros((neq_traffic, 4 * n_stops))], axis=1) ## Inequality constraints index_stops = find(connection_matrix[:, 3]) index_operation = arange(n_stops) power_limit = sparse((ones(n_stops), (index_operation, index_stops)), (n_stops, nl_tra)) # This mapping matrix plays an important role in the connection between the power network and traffic network ## 1) Stopping status A = zeros((3 * n_stops, nv_tra)) # Charging, discharging status, RBS # Discharging A[0:n_stops, 0: nl_tra] = -power_limit.toarray() * mess["PDMAX"] A[0:n_stops, nl_tra + n_stops: nl_tra + 2 * n_stops] = eye(n_stops) # Charging A[n_stops:n_stops * 2, 0: nl_tra] = -power_limit.toarray() * mess["PCMAX"] A[n_stops:n_stops * 2, nl_tra + 2 * n_stops:nl_tra + 3 * n_stops] = eye(n_stops) # spinning reserve A[n_stops * 2: n_stops * 3, 0: nl_tra] = -power_limit.toarray() * (mess["PCMAX"] + mess["PDMAX"]) A[n_stops * 2:n_stops * 3, nl_tra + 3 * n_stops:nl_tra + 4 * n_stops] = eye(n_stops) b = zeros(3 * n_stops) ## 2) Operating status Arange = zeros((2 * n_stops, nv_tra)) brange = zeros(2 * n_stops) # 1) Pdc<(1-Ic)*Pdc_max Arange[0: n_stops, nl_tra:nl_tra + n_stops] = eye(n_stops) * mess["PDMAX"] Arange[0: n_stops, nl_tra + n_stops: nl_tra + n_stops * 2] = eye(n_stops) brange[0: n_stops] = ones(n_stops) * mess["PDMAX"] # 2) Pc<Ic*Pch_max Arange[n_stops:n_stops * 2, nl_tra: nl_tra + n_stops] = -eye(n_stops) * mess["PCMAX"] Arange[n_stops:n_stops * 2, nl_tra + n_stops * 2: nl_tra + n_stops * 3] = eye(n_stops) A = concatenate([A, Arange]) b = concatenate([b, brange]) ## 2) Power limitation Areserve = zeros((2 * n_stops, nv_tra)) breserve = zeros(2 * n_stops) # 1) Pdc-Pc+Rbs<=Pdc_max Areserve[0: n_stops, nl_tra + n_stops: nl_tra + n_stops * 2] = eye(n_stops) Areserve[0: n_stops, nl_tra + n_stops * 2:nl_tra + n_stops * 3] = -eye(n_stops) Areserve[0: n_stops, nl_tra + n_stops * 3:nl_tra + n_stops * 4] = eye(n_stops) breserve[0: n_stops] = ones(n_stops) * mess["PDMAX"] # 2) Pc-Pdc+Rbs<=Pc_max Areserve[n_stops:n_stops * 2, nl_tra + n_stops: nl_tra + n_stops * 2] = - eye(n_stops) Areserve[n_stops:n_stops * 2, nl_tra + n_stops * 2:nl_tra + n_stops * 3] = eye(n_stops) Areserve[n_stops:n_stops * 2, nl_tra + n_stops * 3:nl_tra + n_stops * 4] = eye(n_stops) breserve[n_stops:n_stops * 2] = ones(n_stops) * mess["PCMAX"] A = concatenate([A, Areserve]) b = concatenate([b, breserve]) # Add constraints on the energy status Aenergy = zeros((2 * T, nv_tra)) benergy = zeros(2 * T) for j in range(T): # minimal energy Aenergy[j, nl_tra + n_stops:nl_tra + n_stops + (j + 1) * nb_tra_ele] = 1 / mess["EFF_DC"] Aenergy[j, nl_tra + 2 * n_stops:nl_tra + 2 * n_stops + (j + 1) * nb_tra_ele] = -mess["EFF_CH"] # Aenergy[j, NX_status + 3 * n_stops + (j + 1) * nb_traffic_electric - 1] = 0.5 if j != (T - 1): benergy[j] = mess["E0"] - mess["EMIN"] else: benergy[j] = 0 # maximal energy Aenergy[T + j, nl_tra + n_stops: nl_tra + n_stops + (j + 1) * nb_tra_ele] = -1 / mess["EFF_DC"] Aenergy[T + j, nl_tra + 2 * n_stops:nl_tra + 2 * n_stops + (j + 1) * nb_tra_ele] = mess["EFF_CH"] if j != (T - 1): benergy[T + j] = mess["EMAX"] - mess["E0"] else: benergy[T + j] = 0 A = concatenate([A, Aenergy]) b = concatenate([b, benergy]) c = concatenate([connection_matrix[:, TIME], zeros(n_stops * 4)]) # sol = milp(zeros(NX_traffic), q=zeros(NX_traffic), Aeq=Aeq, beq=beq, A=A, b=b, xmin=lx, xmax=ux) model_tess = {"c": c, "q": zeros(nv_tra), "lb": lx, "ub": ux, "vtypes": vtypes, "A": A, "b": b, "Aeq": Aeq, "beq": beq, "NV": nv_tra, } return model_tess def problem_formulation_tess_second_stage(self, mess): """ Problem formulation for transportation energy storage scheduling, including vehicle routine problem and etc. :param tess: specific tess information :param traffic_network: transportation network information :return: """ T = self.T n_stops = self.n_stops # Number of stops in nb_tra_ele = self.nb_tra_ele nv = 2 * n_stops + T # Status transition, charging status, charging rate, discharging rate, spinning reserve lb = zeros(nv) ub = zeros(nv) lb[n_stops * 2:nv] = mess["EMIN"] ub[n_stops * 0:n_stops * 1] = mess["PDMAX"] ub[n_stops * 1:n_stops * 2] = mess["PCMAX"] ub[n_stops * 2:nv] = mess["EMAX"] lb[-1] = mess["E0"] # energy storage systems end status ub[-1] = mess["E0"] # energy storage systems end status vtypes = ["c"] * nv # The energy status dynamics Aeq = zeros((T, nv)) beq = zeros(T) for t in range(T): Aeq[t, n_stops * 2 + t] = 1 Aeq[t, n_stops + nb_tra_ele * t:n_stops + nb_tra_ele * (t + 1)] = -mess["EFF_CH"] Aeq[t, nb_tra_ele * t:nb_tra_ele * (t + 1)] = 1 / mess["EFF_DC"] if t == 0: beq[t] = mess["E0"] else: Aeq[t, n_stops * 2 + t - 1] = -1 c = concatenate((ones(n_stops * 2) * mess["COST_OP"], zeros(T))) # sol = milp(c, Aeq=Aeq, beq=beq, A=None, b=None, xmin=lx, xmax=ux) model_tess = {"c": c, "q": zeros(nv), "lb": lb, "ub": ub, "vtypes": vtypes, "A": None, "b": None, "Aeq": Aeq, "beq": beq, "NX": nv, } return model_tess def scenario_generation_reduction(self, micro_grids, profile, pns, pv_profile, update=0, ns=2, ns_reduced=2, std=0.03, interval=0.05, std_pv=0.05): """ Scenario generation function for the second-stage scheduling Stochastic variables include 1) loads in distribution networks, active loads for 2) AC bus and 3)DC bus. The assumption is that, the 1) loads in distribution networks follow normal distribution nb*T 2) loads for AC bus and DC bus follow uniform distribution nmg*T*4 :return: """ T = self.T nmg = self.nmg nb = self.nb db_management = DataBaseManagement() if update > 0: # 1) scenario generation bus_load = zeros((ns, nb * T)) mg_load = zeros((ns, nmg * T * 2)) mg_pv = zeros((ns, nmg * T)) weight = ones(ns) / ns for i in range(ns): for t in range(T): for j in range(nb): bus_load[i, t * nb + j] = pns["bus"][j, PD] * (1 + random.normal(0, std)) * profile[t] pv_rand = random.normal(0, std_pv) # all PV are correlated! for j in range(nmg): mg_load[i, t * nmg + j] = micro_grids[j]["PD"]["AC"][t] * \ (1 + random.uniform(-interval, interval)) mg_load[i, nmg * T + t * nmg + j] = micro_grids[j]["PD"]["DC"][t] * \ (1 + random.uniform(-interval, interval)) mg_pv[i, t * nmg + j] = micro_grids[j]["PV"]["PMAX"] * pv_profile[t] * \ (1 + pv_rand) # 2) scenario reduction scenario_reduction = ScenarioReduction() (scenario_reduced, weight_reduced) = \ scenario_reduction.run(scenario=concatenate([bus_load, mg_load, mg_pv], axis=1), weight=weight, n_reduced=ns_reduced, power=2) # 3) Store the data into database db_management.create_table("scenarios", nb=nb, nmg=nmg) for i in range(ns - ns_reduced): for t in range(T): # print(scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist()) db_management.insert_data_scenario("scenarios", scenario=i, weight=weight_reduced[i], time=t, nb=nb, pd=scenario_reduced[i, t * nb:(t + 1) * nb].tolist(), nmg=nmg, pd_ac=scenario_reduced[i, nb * T + t * nmg: nb * T + (t + 1) * nmg].tolist(), pd_dc=scenario_reduced[i, nb * T + nmg * T + t * nmg: nb * T + nmg * T + (t + 1) * nmg].tolist(), ppv=scenario_reduced[i, nb * T + nmg * T * 2 + t * nmg: nb * T + nmg * T * 2 + (t + 1) * nmg].tolist()) # print(t) else: # 4) if not updated, inquery the database scenario_reduced = zeros((ns - ns_reduced, nb * T + nmg * T * 3)) weight_reduced = zeros(ns - ns_reduced) for i in range(ns - ns_reduced): for t in range(T): data = db_management.inquery_data_scenario(table_name="scenarios", scenario=i, time=t) weight_reduced[i] = data[1] scenario_reduced[i, nb * t:nb * (t + 1)] = array(data[3:nb + 3]) scenario_reduced[i, nb * T + nmg * t:nb * T + nmg * (t + 1)] = array(data[nb + 3:nb + 3 + nmg]) scenario_reduced[i, nb * T + nmg * T + nmg * t:nb * T + nmg * T + nmg * (t + 1)] = \ array(data[nb + 3 + nmg:nb + 3 + nmg * 2]) scenario_reduced[i, nb * T + nmg * T * 2 + nmg * t:nb * T + nmg * T * 2 + nmg * (t + 1)] = \ array(data[nb + 3 + nmg * 2:nb + 3 + nmg * 3]) # assert sum(weight_reduced) == 1, "The weight factor is not right!" # 4) return value ds_load_profile = scenario_reduced[:, 0:nb * T] mgs_load_profile = scenario_reduced[:, nb * T:nb * T + nmg * T * 2] pv_load_profile = scenario_reduced[:, nb * T + nmg * T * 2:] # profile_second_stage = zeros((ns, T)) microgrids_second_stage = [0] * (ns - ns_reduced) # for i in range(ns): # for j in range(T): # profile_second_stage[i, j] = profile[j] * (1 + 0.5 * random.random()) # for i in range(ns - ns_reduced): microgrids_second_stage[i] = deepcopy(micro_grids) for j in range(nmg): microgrids_second_stage[i][j]["PV"]["PROFILE"] = zeros(T) for t in range(T): microgrids_second_stage[i][j]["PD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] microgrids_second_stage[i][j]["QD"]["AC"][t] = mgs_load_profile[i, t * nmg + j] * 0.2 microgrids_second_stage[i][j]["PD"]["DC"][t] = mgs_load_profile[i, T * nmg + t * nmg + j] microgrids_second_stage[i][j]["PV"]["PROFILE"][t] = pv_load_profile[i, t * nmg + j] return ds_load_profile, microgrids_second_stage, weight_reduced if __name__ == "__main__":
mpc = case33.case33() # Default test case
0
2023-11-27 15:57:53+00:00
24k
girgle/DouZero_For_New_HLDDZ
main.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"Unity...
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
15,028
f.write(str(int(time.time())) + " " + cards_str + " " + str(round(win_rate, 2)) + "\n") print("叫牌预估胜率:", win_rate) self.BidWinrate.setText("叫牌胜率:" + str(round(win_rate, 2)) + "%") if jiaodizhu_btn is not None: print("找到《叫地主》按钮", jiaodizhu_btn) HaveBid = True print(win_rate, self.BidThreshold1) if win_rate > self.BidThreshold1: helper.ClickOnImage("jiaodizhu_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("bujiao_btn", region=self.GeneralBtnPos) self.sleep(500) if qiangdizhu_btn is not None: print("找到《抢地主》按钮", qiangdizhu_btn) HaveBid = True if win_rate > self.BidThreshold2: is_stolen = 1 helper.ClickOnImage("qiangdizhu_btn", region=self.GeneralBtnPos) else: print("点《不抢》") helper.ClickOnImage("buqiang_btn", region=self.GeneralBtnPos) self.sleep(500) if jiabei_btn is not None: self.sleep(500) break self.label.setText("游戏开始") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') laotou = helper.LocateOnScreen("laotou", region=(761, 45, 255, 100)) while laotou is not None: self.detect_start_btn() if not self.RunGame: break self.sleep(200) print("在游戏里,还在抢地主。。。。") self.label.setText("在抢地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') print("底牌现身。。。") self.label.setText("抢完地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') self.sleep(200) llcards = self.find_landlord_cards() while len(llcards) != 3: self.detect_start_btn() if not self.RunGame: break if len(llcards) > 3: self.ThreeLandlordCardsConfidence += 0.05 time.sleep(200) elif len(llcards) < 3: self.ThreeLandlordCardsConfidence -= 0.05 time.sleep(200) llcards = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + llcards) print("地主牌:", llcards) cards = self.find_my_cards() while len(cards) != 17 and len(cards) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) cards = self.find_my_cards() cards_str = "".join([card[0] for card in cards]) self.UserHandCards.setText("手牌:" + cards_str) print("手牌:" + cards_str) if len(cards_str) == 20: win_rate = LandlordModel.predict(cards_str) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") print("预估地主胜率:", win_rate) else: user_position_code = self.find_landlord(self.LandlordFlagPos) print(user_position_code) while user_position_code is None: self.detect_start_btn() if not self.RunGame: break user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) user_position = ['up', 'landlord', 'down'][user_position_code] win_rate = FarmerModel.predict(cards_str, llcards, user_position) - 5 print("预估农民胜率:", win_rate) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") self.sleep(500) if win_rate > self.JiabeiThreshold[is_stolen][0]: chaojijiabei_btn = helper.LocateOnScreen("chaojijiabei_btn", region=self.GeneralBtnPos) if chaojijiabei_btn is not None: helper.ClickOnImage("chaojijiabei_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) elif win_rate > self.JiabeiThreshold[is_stolen][1]: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) else: helper.ClickOnImage("bujiabei_btn", region=self.GeneralBtnPos) self.sleep(500) if win_rate > self.MingpaiThreshold: self.sleep(1000) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) while mingpai_btn is None: print('没找到《明牌》按钮') self.sleep(200) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) helper.ClickOnImage("mingpai_btn", region=self.GeneralBtnPos) self.sleep(500) print("加倍环节已结束") def animation(self, cards):
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper() class MyPyQT_Form(QtWidgets.QWidget, Ui_Form): def __init__(self): super(MyPyQT_Form, self).__init__() self.other_hands_cards_str = None self.stop_sign = None self.loop_sign = None self.env = None self.three_landlord_cards_env = None self.three_landlord_cards_real = None self.user_hand_cards_env = None self.user_hand_cards_real = None self.play_order = None self.card_play_data_list = None self.other_hand_cards = None self.other_played_cards_env = None self.other_played_cards_real = None self.user_position = None self.user_position_code = None self.setupUi(self) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮 QtCore.Qt.WindowStaysOnTopHint | # 窗体总在最前端 QtCore.Qt.WindowCloseButtonHint) self.setWindowIcon(QIcon(':/pics/favicon.ico')) self.setWindowTitle("DouZero欢乐斗地主v2.0") self.setFixedSize(self.width(), self.height()) # 固定窗体大小 self.move(50, 50) # self.setWindowIcon(QIcon('pics/favicon.ico')) window_pale = QtGui.QPalette() # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap("pics/bg.png"))) self.setPalette(window_pale) self.SingleButton.clicked.connect(self.game_single) self.LoopButton.clicked.connect(self.game_loop) self.StopButton.clicked.connect(self.stop) # self.Players = [self.RPlayer, self.Player, self.LPlayer] self.Players = [self.RPlayedCard, self.PredictedCard, self.LPlayedCard] self.counter = QTime() # 参数 self.MyConfidence = 0.8 # 我的牌的置信度 self.OtherConfidence = 0.8 # 别人的牌的置信度 self.WhiteConfidence = 0.85 # 检测白块的置信度 self.LandlordFlagConfidence = 0.8 # 检测地主标志的置信度 self.ThreeLandlordCardsConfidence = 0.8 # 检测地主底牌的置信度 self.PassConfidence = 0.7 self.PassConfidence = 0.8 self.WaitTime = 1 # 等待状态稳定延时 self.MyFilter = 40 # 我的牌检测结果过滤参数 self.OtherFilter = 25 # 别人的牌检测结果过滤参数 self.SleepTime = 0.1 # 循环中睡眠时间 self.RunGame = False self.AutoPlay = False self.BidThreshold1 = 65 # 叫地主阈值 self.BidThreshold2 = 72 # 抢地主阈值 self.JiabeiThreshold = ( (85, 72), # 叫地主 超级加倍 加倍 阈值 (85, 75) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下) ) self.MingpaiThreshold = 92 # 坐标 self.MyHandCardsPos = (180, 560, 1050, 90) # 我的截图区域 self.LPlayedCardsPos = (320, 280, 500, 120) # 左边出牌截图区域 self.RPlayedCardsPos = (600, 280, 500, 120) # 右边出牌截图区域 self.LandlordCardsPos = (600, 33, 220, 103) # 地主底牌截图区域 self.LPassPos = (360, 360, 120, 80) # 左边不出截图区域 self.RPassPos = (940, 360, 120, 80) # 右边不出截图区域 self.PassBtnPos = (200, 450, 1000, 120) # 要不起截图区域 self.GeneralBtnPos = (200, 450, 1000, 120) # 叫地主、抢地主、加倍按钮截图区域 self.LandlordFlagPos = [(1247, 245, 48, 52), (12, 661, 51, 53), (123, 243, 52, 54)] # 地主标志截图区域(右-我-左) self.card_play_model_path_dict = { 'landlord': "baselines/resnet/resnet_landlord.ckpt", 'landlord_up': "baselines/resnet/resnet_landlord_up.ckpt", 'landlord_down': "baselines/resnet/resnet_landlord_down.ckpt" } def game_single(self): self.loop_sign = 0 self.stop_sign = 0 self.detect_start_btn() self.before_start() self.init_cards() def game_loop(self): self.loop_sign = 1 self.stop_sign = 0 while True: if self.stop_sign == 1: break self.detect_start_btn() self.before_start() self.init_cards() self.sleep(5000) def stop(self): self.stop_sign = 1 print("按下停止键") try: self.RunGame = False self.loop_sign = 0 self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position]) self.env = GameEnv(ai_players) try: self.start() except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() print(e) traceback.print_tb(exc_tb) # self.stop() def sleep(self, ms): self.counter.restart() while self.counter.elapsed() < ms: QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50) def start(self): # print("现在的出牌顺序是谁:0是我;1是下家;2是上家:", self.play_order) self.env.card_play_init(self.card_play_data_list) print("开始出牌\n") while not self.env.game_over: self.detect_start_btn() if not self.RunGame: break if self.play_order == 0: self.PredictedCard.setText("...") action_message = self.env.step(self.user_position) score = float(action_message['win_rate']) if "resnet" in self.card_play_model_path_dict[self.user_position]: score *= 8 self.UserHandCards.setText("手牌:" + str(''.join( [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards]))[::-1]) self.PredictedCard.setText(action_message["action"] if action_message["action"] else "不出") self.PredictedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') self.WinRate.setText("评分:" + action_message["win_rate"]) print("出牌:", action_message["action"] if action_message["action"] else "不出", ",得分:", action_message["win_rate"]) print("\n手牌:", str(''.join( [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards]))) if action_message["action"] == "": result = helper.LocateOnScreen("pass_btn", region=self.PassBtnPos, confidence=0.7) passSign = helper.LocateOnScreen("yaobuqi", region=self.GeneralBtnPos, confidence=0.7) while result is None and passSign is None: self.detect_start_btn() if not self.RunGame: break result = helper.LocateOnScreen("pass_btn", region=self.PassBtnPos, confidence=0.7) passSign = helper.LocateOnScreen("yaobuqi", region=self.GeneralBtnPos, confidence=0.7) if result is not None: helper.ClickOnImage("pass_btn", region=self.PassBtnPos, confidence=0.7) self.sleep(100) if passSign is not None: self.sleep(100) helper.ClickOnImage("yaobuqi", region=self.GeneralBtnPos, confidence=0.7) # helper.LeftClick((940, 640)) self.sleep(200) else: hand_cards_str = ''.join( [EnvCard2RealCard[c] for c in self.env.info_sets[self.user_position].player_hand_cards]) result = helper.LocateOnScreen("play_card", region=self.PassBtnPos, confidence=0.7) while result is None: self.detect_start_btn() if not self.RunGame: break print("等待出牌按钮") self.sleep(200) result = helper.LocateOnScreen("play_card", region=self.PassBtnPos, confidence=0.7) self.click_cards(action_message["action"]) self.sleep(200) helper.ClickOnImage("play_card", region=self.PassBtnPos, confidence=0.7) self.sleep(200) result = helper.LocateOnScreen("play_card", region=self.PassBtnPos, confidence=0.7) if result is not None: self.click_cards(action_message["action"]) ani = self.animation(action_message["action"]) if ani: self.sleep(800) if len(hand_cards_str) == 0: self.RunGame = False try: if self.env is not None: self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") print("程序走到这里") except AttributeError as e: traceback.print_exc() break self.PredictedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.sleep(200) self.play_order = 1 elif self.play_order == 1: self.RPlayedCard.setText("等待下家出牌") self.RPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') pass_flag = helper.LocateOnScreen('buchu', region=self.RPassPos) rightCards = self.find_other_cards(self.RPlayedCardsPos) while self.RunGame and len(rightCards) == 0 and pass_flag is None: self.detect_start_btn() print("等待下家出牌") self.sleep(100) pass_flag = helper.LocateOnScreen('buchu', region=self.RPassPos) rightCards = self.find_other_cards(self.RPlayedCardsPos) self.sleep(10) # 未找到"不出" if pass_flag is None: # 识别下家出牌 while True: self.detect_start_btn() if not self.RunGame: break rightOne = self.find_other_cards(self.RPlayedCardsPos) self.sleep(50) rightTwo = self.find_other_cards(self.RPlayedCardsPos) if rightOne == rightTwo: self.other_played_cards_real = rightOne if "X" in rightOne or "D" in rightOne: self.sleep(500) self.other_played_cards_real = self.find_other_cards(self.RPlayedCardsPos) ani = self.animation(rightOne) if ani: self.RPlayedCard.setText("等待动画") self.sleep(500) break # self.RBrowser.append(self.other_played_cards_real) # 找到"不出" else: self.other_played_cards_real = "" print("\n下家出牌:", self.other_played_cards_real) self.other_played_cards_env = [RealCard2EnvCard[c] for c in list(self.other_played_cards_real)] self.other_played_cards_env.sort() self.env.step(self.user_position, self.other_played_cards_env) # 更新界面 self.RPlayedCard.setText(self.other_played_cards_real if self.other_played_cards_real else "不出") self.RPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0);') # self.other_hands_cards_str = self.other_hands_cards_str.replace(self.other_played_cards_real, "", 1) self.other_hands_cards_str = self.subtract_strings(self.other_hands_cards_str, self.other_played_cards_real) # print("记牌器:", self.other_hands_cards_str) self.cards_recorder(self.other_hands_cards_str) self.sleep(200) self.play_order = 2 elif self.play_order == 2: self.LPlayedCard.setText("等待上家出牌") self.LPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') pass_flag = helper.LocateOnScreen('buchu', region=self.LPassPos) leftCards = self.find_other_cards(self.LPlayedCardsPos) while len(leftCards) == 0 and pass_flag is None: self.detect_start_btn() if not self.RunGame: break print("等待上家出牌") self.sleep(100) pass_flag = helper.LocateOnScreen('buchu', region=self.LPassPos) leftCards = self.find_other_cards(self.LPlayedCardsPos) self.sleep(10) if pass_flag is None: # 识别上家出牌 while True: self.detect_start_btn() if not self.RunGame: break leftOne = self.find_other_cards(self.LPlayedCardsPos) self.sleep(50) leftTwo = self.find_other_cards(self.LPlayedCardsPos) if leftOne == leftTwo: self.other_played_cards_real = leftOne if "X" in leftOne or "D" in leftOne: self.sleep(500) self.other_played_cards_real = self.find_other_cards(self.LPlayedCardsPos) ani = self.animation(leftOne) if ani: self.LPlayedCard.setText("等待动画") self.sleep(500) break # self.LBrowser.append(self.other_played_cards_real) # 找到"不出" else: self.other_played_cards_real = "" print("\n上家出牌:", self.other_played_cards_real) self.other_played_cards_env = [RealCard2EnvCard[c] for c in list(self.other_played_cards_real)] self.other_played_cards_env.sort() self.env.step(self.user_position, self.other_played_cards_env) # 更新界面 self.LPlayedCard.setText(self.other_played_cards_real if self.other_played_cards_real else "不出") self.LPlayedCard.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.other_hands_cards_str = self.subtract_strings(self.other_hands_cards_str, self.other_played_cards_real) # print("记牌器:", self.other_hands_cards_str) self.cards_recorder(self.other_hands_cards_str) self.sleep(300) self.play_order = 0 if self.loop_sign == 0: self.stop() print("这里有问题") self.label.setText("游戏结束") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.init_display() def detect_start_btn(self): beans = [(308, 204, 254, 60), (295, 474, 264, 60), (882, 203, 230, 60)] for i in beans: result = helper.LocateOnScreen("over", region=i, confidence=0.9) if result is not None: print("找到游戏结束的豆子,游戏已结束") self.RunGame = False self.init_display() try: if self.env is not None: self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") print("程序走到这里") except AttributeError as e: traceback.print_exc() break result = helper.LocateOnScreen("continue", region=(1100, 617, 200, 74)) if result is not None: if self.loop_sign == 0: print("检测到本局游戏已结束") self.label.setText("游戏已结束") self.stop() else: self.RunGame = True helper.ClickOnImage("continue", region=(1100, 617, 200, 74)) self.sleep(100) try: if self.env is not None: self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() result = helper.LocateOnScreen("start_game", region=(720, 466, 261, 117)) if result is not None: helper.ClickOnImage("start_game", region=(720, 466, 261, 117)) self.sleep(1000) result = helper.LocateOnScreen("sure", region=(657, 500, 216, 72)) if result is not None: helper.ClickOnImage("sure", region=(657, 500, 216, 72)) self.sleep(1000) result = helper.LocateOnScreen("good", region=(434, 599, 219, 88)) if result is not None: helper.ClickOnImage("good", region=(434, 599, 219, 88)) self.sleep(1000) result = helper.LocateOnScreen("zhidao", region=(593, 543, 224, 94)) if result is not None: helper.ClickOnImage("zhidao", region=(593, 543, 224, 94)) self.sleep(1000) result = helper.LocateOnScreen("chacha", region=(930, 150, 454, 504), confidence=0.7) if result is not None: helper.ClickOnImage("chacha", region=(930, 150, 454, 504), confidence=0.7) self.sleep(1000) def cards_filter(self, location, distance): # 牌检测结果滤波 if len(location) == 0: return 0 locList = [location[0][0]] poslist = [location[0]] count = 1 for e in location: flag = 1 # “是新的”标志 for have in locList: # print(abs(e[0] - have)) if abs(e[0] - have) <= distance: flag = 0 break if flag: count += 1 locList.append(e[0]) poslist.append(e) # print(poslist) return count, poslist def find_cards(self, img, pos, mark="", confidence=0.8): cards_real = "" D_king = 0 X_king = 0 for card in AllCards: result = gh.LocateAllOnImage(img, helper.PicsCV[mark + card], region=pos, confidence=confidence) if len(result) > 0: count, s = self.cards_filter(list(result), 30) if card == "X" or card == "D": for a in s: classifier = DC.ColorClassify(debug=True) img1 = img[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] img2 = img1[a[1]:a[1] + a[3] - 50, a[0]:a[0] + 20] # 注意连着裁切时img不能重名 # gh.ShowImg(img2) result = classifier.classify(img2) # print(result) for b in result: if b[0] == "Red": if b[1] > 0.54: D_king = 1 else: X_king = 1 else: cards_real += card[0] * count if X_king: cards_real += "X" cards_real = cards_real[-1] + cards_real[:-1] if D_king: cards_real += "D" cards_real = cards_real[-1] + cards_real[:-1] return cards_real def find_my_cards(self): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) my_cards_real = self.find_cards(img, self.MyHandCardsPos, mark="m") return my_cards_real def find_other_cards(self, pos): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) other_cards_real = self.find_cards(img, pos, mark="o") return other_cards_real def find_landlord_cards(self): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) landlord__cards_real = self.find_cards(img, self.LandlordCardsPos, mark="z", confidence=self.ThreeLandlordCardsConfidence) return landlord__cards_real def click_cards(self, out_cards): cards = self.find_my_cards() num = len(cards) space = 45.6 res1 = helper.LocateOnScreen("up_left", region=self.MyHandCardsPos, confidence=0.65) while res1 is None: self.detect_start_btn() if not self.RunGame: break print("未找到手牌区域") self.sleep(500) res1 = helper.LocateOnScreen("up_left", region=self.MyHandCardsPos, confidence=0.65) pos = res1[0] + 6, res1[1] + 7 res2 = helper.LocateOnScreen("up_left", region=(180, 580, 1050, 90), confidence=0.65) if res2 is not None: pos = res1[0] + 6, res1[1] + 7 pos_list = [(int(pos[0] + i * space), pos[1]) for i in range(num)] # 将两个列表合并转为字典 cards_dict = defaultdict(list) for key, value in zip(cards, pos_list): cards_dict[key].append(value) # 转换为普通的字典 cards_dict = dict(cards_dict) remove_dict = {key: [] for key in cards_dict.keys()} # print(cards_dict) if out_cards == "DX": helper.LeftClick2((cards_dict["X"][0][0] + 20, 650)) self.sleep(500) else: for i in out_cards: cars_pos = cards_dict[i][-1][0:2] # print("准备点击的牌:", cards_dict[i]) point = cars_pos[0] + 20, cars_pos[1] + 100 img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB) check_one = self.find_cards(img=img, pos=(cars_pos[0] - 2, 565, 60, 60), mark="m", confidence=0.8) # print("系统帮你点的牌:", check_one, "你要出的牌:", i) if check_one == i and check_one != "D" and check_one != "X": # print("腾讯自动帮你选牌:", check_one) img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) cv2.imwrite("debug.png", img) else: helper.LeftClick2(point) # print(point) self.sleep(100) remove_dict[i].append(cards_dict[i][-1]) cards_dict[i].remove(cards_dict[i][-1]) # print("remove_dict", remove_dict) img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) check_cards = self.find_cards(img, (180, 590, 1050, 90), mark="m") for i in out_cards: cards = cards.replace(i, "", 1) print("检查剩的牌: ", check_cards, "应该剩的牌: ", cards) if len(check_cards) < len(cards): for m in check_cards: cards = cards.replace(m, "", 1) print("系统多点的牌: ", cards) for n in cards: # print("字典里还剩的牌: ", cards_dict) cars_pos2 = cards_dict[n][-1][0:2] # print("准备点回来的牌:", cars_pos2) point2 = cars_pos2[0] + 20, cars_pos2[1] + 100 helper.LeftClick2(point2) self.sleep(100) remove_dict[n].append(cards_dict[n][-1]) cards_dict[n].remove(cards_dict[n][-1]) elif len(check_cards) > len(cards): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) cv2.imwrite("debug2.png", img) for m in cards: check_cards = check_cards.replace(m, "", 1) print("系统少点的牌: ", check_cards) for n in check_cards: # print("删除的字典: ", remove_dict) cars_pos3 = remove_dict[n][0][0:2] # print("准备再点出去的牌:", cars_pos3) point3 = cars_pos3[0] + 20, cars_pos3[1] + 100 helper.LeftClick2(point3) self.sleep(300) remove_dict[n].remove(remove_dict[n][0]) # print(remove_dict) self.sleep(200) '''def find_landlord(self, landlord_flag_pos): for pos in landlord_flag_pos: result = helper.LocateOnScreen("landlord", region=pos, confidence=0.6) if result is not None: return landlord_flag_pos.index(pos) self.sleep(200) print("没找到地主的位置")''' def find_landlord(self, landlord_flag_pos): img, _ = helper.Screenshot() img = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2HSV) for pos in landlord_flag_pos: classifier = DC.ColorClassify(debug=True) imgCut = img[pos[1]:pos[1] + pos[3], pos[0]:pos[0] + pos[2]] result = classifier.classify(imgCut) for b in result: if b[0] == "Orange": if b[1] > 0.7: return landlord_flag_pos.index(pos) self.sleep(100) print("未找到地主位置") print("=============================") def before_start(self): global win_rate self.RunGame = True GameHelper.Interrupt = True HaveBid = False is_stolen = 0 in_game = helper.LocateOnScreen("chat", region=(1302, 744, 117, 56)) while self.RunGame and in_game is None: self.sleep(1000) print("还没进入到游戏中。。。") self.label.setText("未进入游戏") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') if self.loop_sign == 1: self.detect_start_btn() in_game = helper.LocateOnScreen("chat", region=(1302, 744, 117, 56)) self.detect_start_btn() self.sleep(300) print(in_game, "进入到游戏中") self.label.setText("游戏开始") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') while True: self.detect_start_btn() if not self.RunGame: break jiaodizhu_btn = helper.LocateOnScreen("jiaodizhu_btn", region=self.GeneralBtnPos) qiangdizhu_btn = helper.LocateOnScreen("qiangdizhu_btn", region=self.GeneralBtnPos) jiabei_btn = helper.LocateOnScreen("jiabei_btn", region=self.GeneralBtnPos) while jiaodizhu_btn is None and qiangdizhu_btn is None and jiabei_btn is None: self.detect_start_btn() if not self.RunGame: break print("等待叫地主、抢地主或加倍") self.sleep(200) jiaodizhu_btn = helper.LocateOnScreen("jiaodizhu_btn", region=self.GeneralBtnPos) qiangdizhu_btn = helper.LocateOnScreen("qiangdizhu_btn", region=self.GeneralBtnPos) jiabei_btn = helper.LocateOnScreen("jiabei_btn", region=self.GeneralBtnPos) print("《叫地主》, 《抢地主》, 《加倍》", jiaodizhu_btn, qiangdizhu_btn, jiabei_btn) cards = self.find_my_cards() while len(cards) != 17 and len(cards) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) cards = self.find_my_cards() cards_str = "".join([card[0] for card in cards]) self.UserHandCards.setText("手牌:" + cards_str) print("手牌:" + cards_str) win_rate = BidModel.predict(cards_str) if not HaveBid: with open("cardslog.txt", "a") as f: f.write(str(int(time.time())) + " " + cards_str + " " + str(round(win_rate, 2)) + "\n") print("叫牌预估胜率:", win_rate) self.BidWinrate.setText("叫牌胜率:" + str(round(win_rate, 2)) + "%") if jiaodizhu_btn is not None: print("找到《叫地主》按钮", jiaodizhu_btn) HaveBid = True print(win_rate, self.BidThreshold1) if win_rate > self.BidThreshold1: helper.ClickOnImage("jiaodizhu_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("bujiao_btn", region=self.GeneralBtnPos) self.sleep(500) if qiangdizhu_btn is not None: print("找到《抢地主》按钮", qiangdizhu_btn) HaveBid = True if win_rate > self.BidThreshold2: is_stolen = 1 helper.ClickOnImage("qiangdizhu_btn", region=self.GeneralBtnPos) else: print("点《不抢》") helper.ClickOnImage("buqiang_btn", region=self.GeneralBtnPos) self.sleep(500) if jiabei_btn is not None: self.sleep(500) break self.label.setText("游戏开始") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') laotou = helper.LocateOnScreen("laotou", region=(761, 45, 255, 100)) while laotou is not None: self.detect_start_btn() if not self.RunGame: break self.sleep(200) print("在游戏里,还在抢地主。。。。") self.label.setText("在抢地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') print("底牌现身。。。") self.label.setText("抢完地主") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0.5);') self.sleep(200) llcards = self.find_landlord_cards() while len(llcards) != 3: self.detect_start_btn() if not self.RunGame: break if len(llcards) > 3: self.ThreeLandlordCardsConfidence += 0.05 time.sleep(200) elif len(llcards) < 3: self.ThreeLandlordCardsConfidence -= 0.05 time.sleep(200) llcards = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + llcards) print("地主牌:", llcards) cards = self.find_my_cards() while len(cards) != 17 and len(cards) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) cards = self.find_my_cards() cards_str = "".join([card[0] for card in cards]) self.UserHandCards.setText("手牌:" + cards_str) print("手牌:" + cards_str) if len(cards_str) == 20: win_rate = LandlordModel.predict(cards_str) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") print("预估地主胜率:", win_rate) else: user_position_code = self.find_landlord(self.LandlordFlagPos) print(user_position_code) while user_position_code is None: self.detect_start_btn() if not self.RunGame: break user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) user_position = ['up', 'landlord', 'down'][user_position_code] win_rate = FarmerModel.predict(cards_str, llcards, user_position) - 5 print("预估农民胜率:", win_rate) self.PreWinrate.setText("局前胜率:" + str(round(win_rate, 2)) + "%") self.sleep(500) if win_rate > self.JiabeiThreshold[is_stolen][0]: chaojijiabei_btn = helper.LocateOnScreen("chaojijiabei_btn", region=self.GeneralBtnPos) if chaojijiabei_btn is not None: helper.ClickOnImage("chaojijiabei_btn", region=self.GeneralBtnPos) else: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) elif win_rate > self.JiabeiThreshold[is_stolen][1]: helper.ClickOnImage("jiabei_btn", region=self.GeneralBtnPos) self.sleep(500) else: helper.ClickOnImage("bujiabei_btn", region=self.GeneralBtnPos) self.sleep(500) if win_rate > self.MingpaiThreshold: self.sleep(1000) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) while mingpai_btn is None: print('没找到《明牌》按钮') self.sleep(200) mingpai_btn = helper.LocateOnScreen("mingpai_btn", region=self.GeneralBtnPos) helper.ClickOnImage("mingpai_btn", region=self.GeneralBtnPos) self.sleep(500) print("加倍环节已结束") def animation(self, cards):
move_type = get_move_type(self.real_to_env(cards))
1
2023-12-01 04:04:30+00:00
24k
super1207/satoricq
satori.py
[ { "identifier": "AdapterKook", "path": "kook_adapter.py", "snippet": "class AdapterKook:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._access_token = config[\"access_token\"]\n self._http_url = \"https://ww...
import asyncio import aiohttp import json import uuid from kook_adapter import AdapterKook from mihoyo_adapter import AdapterMihoyo from onebot_adapter import AdapterOnebot from config import Config from aiohttp import web from qq_adapter import AdapterQQ from tool import remove_json_null
16,628
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None:
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None:
js = remove_json_null(js)
5
2023-12-03 13:53:47+00:00
24k
aliyun/pai-python-sdk
pai/model.py
[ { "identifier": "git_utils", "path": "pai/common/git_utils.py", "snippet": "def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None):\ndef _validate_git_config(git_config):\ndef _build_and_run_clone_command(git_config, dest_dir):\ndef _clone_command_for_codeup(git_config, dest_di...
import copy import distutils.dir_util import json import logging import os.path import posixpath import shlex import shutil import tempfile import textwrap import time import requests from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from addict import Dict as AttrDict from oss2 import ObjectIterator from .common import git_utils from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat from .common.docker_utils import ContainerRun, run_container from .common.oss_utils import OssUriObj, download, is_oss_uri, upload from .common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) from .exception import DuplicatedMountException, MountPathIsOccupiedException from .image import ImageInfo from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType from .serializers import SerializerBase from .session import Session, get_default_session from .estimator import AlgorithmEstimator
17,036
raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str,
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" # Listen Port listen_port = 8000 # Default model path in container model_path = "/eas/workspace/model/" # Default user code path in container code_path = "/ml/usercode/" class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None): """ResourceConfig initializer. The public resource group does not support requesting GPU resources with `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services deployed to dedicated resource groups that provide GPU machine instances. Args: cpu (int): The number of CPUs that each instance requires. memory (int): The amount of memory that each instance requires, must be an integer, Unit: MB. gpu (int): The number of GPUs that each instance requires. gpu_memory (int): The amount of GPU memory that each instance requires. The value must be an integer, Unit: GB. PAI allows memory resources of a GPU to be allocated to multiple instances. If you want multiple instances to share the memory resources of a GPU, set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each instance occupies a GPU and the gpu_memory parameter does not take effect. .. note:: **Important** PAI does not enable the strict isolation of GPU memory. To prevent out of memory (OOM) errors, make sure that the GPU memory used by each instance does not exceed the requested amount. """ self.cpu = cpu self.memory = memory self.gpu = gpu self.gpu_memory = gpu_memory def __repr__(self): return ( f"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0}," f" gpu_memory={self.gpu_memory or 0}GB)" ) def __str__(self): return self.__repr__() def to_dict(self): """Transform the ResourceConfig instance to a dictionary. Returns: dict: """ res = { "cpu": self.cpu, "gpu": self.gpu, "gpu_memory": self.gpu_memory, "memory": self.memory, } return {k: v for k, v in res.items() if v is not None} class InferenceSpec(object): """A class used to describe how to create a prediction service. InferenceSpec is using to describe how the model is serving in PAI. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. Example of how to config a InferneceSpec:: >>> # build an inference_spec that using XGBoost processor. >>> infer_spec = InferenceSpec(processor="xgboost") >>> infer_spec.metadata.rpc.keepalive = 1000 >>> infer_spec.warm_up_data_path = "oss://bucket-name/path/to/warmup-data" >>> infer_spec.add_option("metadata.rpc.max_batch_size", 8) >>> print(infer_spec.processor) xgboost >>> print(infer_spec.metadata.rpc.keepalive) 1000 >>> print(infer_spec.metadata.rpc.max_batch_size) 8 >>> print(infer_spec.to_dict()) {'processor': 'xgboost', 'metadata': {'rpc': {'keepalive': 1000, 'max_batch_size': 8}}, 'warm_up_data_path': 'oss://bucket-name/path/to/warmup-data'} """ def __init__(self, *args, **kwargs): """InferenceSpec initializer. Args: **kwargs: Parameters of the inference spec. """ properties = kwargs.pop("__properties", []) cfg_dict = copy.deepcopy(kwargs) cfg_dict = {k: v for k, v in cfg_dict.items() if not k.startswith("_")} if args: if len(args) > 1: raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str,
session: Session = None,
21
2023-12-01 01:40:12+00:00
24k
zerolink-io/zerolink-python
zerolink/req.py
[ { "identifier": "settings", "path": "zerolink/settings.py", "snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HO...
from typing import Any, Optional, cast from zerolink import settings from zerolink.exc import APIError, AuthenticationError from zerolink_client import Client from zerolink_client.api.default import finetune, get_models_models_get from zerolink_client.api.entity import ( desc_entity_id, desc_entity_ontology, lookup_entity, lookup_relation, search_entity, ) from zerolink_client.api.extract import extract_text from zerolink_client.api.fact import ( create_userattribute, create_userentity, create_userrule, create_usertriple, ) from zerolink_client.api.kg import get_triple from zerolink_client.api.question import post_question from zerolink_client.api.session import ( create_session, get_session_entities, get_session_facts, get_user_session, ) from zerolink_client.api.user import create_user from zerolink_client.models import ( ChatSession, CreateAttribute, CreateEntity, CreateRule, CreateRuleResponse, CreateTriple, CreateTuneJobResponse, Entity, HTTPValidationError, Question, QuestionResponse, TextExtract, ) from zerolink_client.types import File, UNSET
15,942
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError):
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError):
raise APIError(str(rep))
1
2023-12-03 07:50:04+00:00
24k
JunMa11/UHNSeg-Quiz
nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
[ { "identifier": "DC_and_BCE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,\n dice_class=MemoryEfficientSoftDiceLoss...
import torch from torch import autocast from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer from nnunetv2.utilities.helpers import dummy_context from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from torch.nn.parallel import DistributedDataParallel as DDP
17,667
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device.
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device.
with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():
5
2023-12-04 19:43:14+00:00
24k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/neuralhmm_tts.py
[ { "identifier": "Encoder", "path": "TTS/tts/layers/overflow/common_layers.py", "snippet": "class Encoder(nn.Module):\n r\"\"\"Neural HMM Encoder\n\n Same as Tacotron 2 encoder but increases the input length by states per phone\n\n Args:\n num_chars (int): Number of characters in the inpu...
import os import torch from typing import Dict, List, Union from coqpit import Coqpit from torch import nn from trainer.logging.tensorboard_logger import TensorboardLogger from TTS.tts.layers.overflow.common_layers import Encoder, OverflowUtils from TTS.tts.layers.overflow.neural_hmm import NeuralHMM from TTS.tts.layers.overflow.plotting_utils import ( get_spec_from_most_probable_state, plot_transition_probabilities_to_numpy, ) from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.generic_utils import format_aux_input from TTS.utils.io import load_fsspec from TTS.utils.audio import AudioProcessor
18,230
""" text, text_len, mels, mel_len = self.preprocess_batch(text, text_len, mels, mel_len) encoder_outputs, encoder_output_len = self.encoder(text, text_len) log_probs, fwd_alignments, transition_vectors, means = self.neural_hmm( encoder_outputs, encoder_output_len, mels.transpose(1, 2), mel_len ) outputs = { "log_probs": log_probs, "alignments": fwd_alignments, "transition_vectors": transition_vectors, "means": means, } return outputs @staticmethod def _training_stats(batch): stats = {} stats["avg_text_length"] = batch["text_lengths"].float().mean() stats["avg_spec_length"] = batch["mel_lengths"].float().mean() stats["avg_text_batch_occupancy"] = (batch["text_lengths"].float() / batch["text_lengths"].float().max()).mean() stats["avg_spec_batch_occupancy"] = (batch["mel_lengths"].float() / batch["mel_lengths"].float().max()).mean() return stats def train_step(self, batch: dict, criterion: nn.Module): text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] outputs = self.forward( text=text_input, text_len=text_lengths, mels=mel_input, mel_len=mel_lengths, ) loss_dict = criterion(outputs["log_probs"] / (mel_lengths.sum() + text_lengths.sum())) # for printing useful statistics on terminal loss_dict.update(self._training_stats(batch)) return outputs, loss_dict def eval_step(self, batch: Dict, criterion: nn.Module): return self.train_step(batch, criterion) def _format_aux_input(self, aux_input: Dict, default_input_dict): """Set missing fields to their default value. Args: aux_inputs (Dict): Dictionary containing the auxiliary inputs. """ default_input_dict = default_input_dict.copy() default_input_dict.update( { "sampling_temp": self.sampling_temp, "max_sampling_time": self.max_sampling_time, "duration_threshold": self.duration_threshold, } ) if aux_input: return format_aux_input(default_input_dict, aux_input) return default_input_dict @torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose)
class NeuralhmmTTS(BaseTTS): """Neural HMM TTS model. Paper:: https://arxiv.org/abs/2108.13320 Paper abstract:: Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using HMMs.However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate. Audio examples and code are available at https://shivammehta25.github.io/Neural-HMM/ . Note: - This is a parameter efficient version of OverFlow (15.3M vs 28.6M). Since it has half the number of parameters as OverFlow the synthesis output quality is suboptimal (but comparable to Tacotron2 without Postnet), but it learns to speak with even lesser amount of data and is still significantly faster than other attention-based methods. - Neural HMMs uses flat start initialization i.e it computes the means and std and transition probabilities of the dataset and uses them to initialize the model. This benefits the model and helps with faster learning If you change the dataset or want to regenerate the parameters change the `force_generate_statistics` and `mel_statistics_parameter_path` accordingly. - To enable multi-GPU training, set the `use_grad_checkpointing=False` in config. This will significantly increase the memory usage. This is because to compute the actual data likelihood (not an approximation using MAS/Viterbi) we must use all the states at the previous time step during the forward pass to decide the probability distribution at the current step i.e the difference between the forward algorithm and viterbi approximation. Check :class:`TTS.tts.configs.neuralhmm_tts_config.NeuralhmmTTSConfig` for class arguments. """ def __init__( self, config: "NeuralhmmTTSConfig", ap: "AudioProcessor" = None, tokenizer: "TTSTokenizer" = None, speaker_manager: SpeakerManager = None, ): super().__init__(config, ap, tokenizer, speaker_manager) # pass all config fields to `self` # for fewer code change self.config = config for key in config: setattr(self, key, config[key]) self.encoder = Encoder(config.num_chars, config.state_per_phone, config.encoder_in_out_features) self.neural_hmm = NeuralHMM( frame_channels=self.out_channels, ar_order=self.ar_order, deterministic_transition=self.deterministic_transition, encoder_dim=self.encoder_in_out_features, prenet_type=self.prenet_type, prenet_dim=self.prenet_dim, prenet_n_layers=self.prenet_n_layers, prenet_dropout=self.prenet_dropout, prenet_dropout_at_inference=self.prenet_dropout_at_inference, memory_rnn_dim=self.memory_rnn_dim, outputnet_size=self.outputnet_size, flat_start_params=self.flat_start_params, std_floor=self.std_floor, use_grad_checkpointing=self.use_grad_checkpointing, ) self.register_buffer("mean", torch.tensor(0)) self.register_buffer("std", torch.tensor(1)) def update_mean_std(self, statistics_dict: Dict): self.mean.data = torch.tensor(statistics_dict["mean"]) self.std.data = torch.tensor(statistics_dict["std"]) def preprocess_batch(self, text, text_len, mels, mel_len): if self.mean.item() == 0 or self.std.item() == 1: statistics_dict = torch.load(self.mel_statistics_parameter_path) self.update_mean_std(statistics_dict) mels = self.normalize(mels) return text, text_len, mels, mel_len def normalize(self, x): return x.sub(self.mean).div(self.std) def inverse_normalize(self, x): return x.mul(self.std).add(self.mean) def forward(self, text, text_len, mels, mel_len): """ Forward pass for training and computing the log likelihood of a given batch. Shapes: Shapes: text: :math:`[B, T_in]` text_len: :math:`[B]` mels: :math:`[B, T_out, C]` mel_len: :math:`[B]` """ text, text_len, mels, mel_len = self.preprocess_batch(text, text_len, mels, mel_len) encoder_outputs, encoder_output_len = self.encoder(text, text_len) log_probs, fwd_alignments, transition_vectors, means = self.neural_hmm( encoder_outputs, encoder_output_len, mels.transpose(1, 2), mel_len ) outputs = { "log_probs": log_probs, "alignments": fwd_alignments, "transition_vectors": transition_vectors, "means": means, } return outputs @staticmethod def _training_stats(batch): stats = {} stats["avg_text_length"] = batch["text_lengths"].float().mean() stats["avg_spec_length"] = batch["mel_lengths"].float().mean() stats["avg_text_batch_occupancy"] = (batch["text_lengths"].float() / batch["text_lengths"].float().max()).mean() stats["avg_spec_batch_occupancy"] = (batch["mel_lengths"].float() / batch["mel_lengths"].float().max()).mean() return stats def train_step(self, batch: dict, criterion: nn.Module): text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] outputs = self.forward( text=text_input, text_len=text_lengths, mels=mel_input, mel_len=mel_lengths, ) loss_dict = criterion(outputs["log_probs"] / (mel_lengths.sum() + text_lengths.sum())) # for printing useful statistics on terminal loss_dict.update(self._training_stats(batch)) return outputs, loss_dict def eval_step(self, batch: Dict, criterion: nn.Module): return self.train_step(batch, criterion) def _format_aux_input(self, aux_input: Dict, default_input_dict): """Set missing fields to their default value. Args: aux_inputs (Dict): Dictionary containing the auxiliary inputs. """ default_input_dict = default_input_dict.copy() default_input_dict.update( { "sampling_temp": self.sampling_temp, "max_sampling_time": self.max_sampling_time, "duration_threshold": self.duration_threshold, } ) if aux_input: return format_aux_input(default_input_dict, aux_input) return default_input_dict @torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose)
tokenizer, new_config = TTSTokenizer.init_from_config(config)
7
2023-11-29 08:15:06+00:00
24k
magic-research/magic-animate
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,383
verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f)
5
2023-11-21 08:33:54+00:00
24k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword argu...
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
16,086
real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency) model = load_model(model_name_fluency, dtype=dtype) self.output["perplexity"] = self.get_perplexity(dataset, model, tokenizer) log(logger.info, f"Perplexity is {self.output['perplexity']}") del model torch.cuda.empty_cache() return self.output["perplexity"] def faithfulness_multiple(self, dataset, model_name, **kwargs): """Calculates the faithfulness for all stored classifiers Args: dataset (pd.DataFrame): The dataset to be used for evaluation. model_name (str, list of strings): Classifier names to use """ if not isinstance(model_name, (list, tuple)): model_name = [model_name] results = dict() for model in model_name: name = model if not isinstance(name, str): name = model.__str__() results[name] = self.faithfulness(dataset, model_name=model, **kwargs) self.output["faithfulness"] = results return results def faithfulness(self, dataset, finetune_model=True, classification_with_input=True, model_name="distilbert-base-uncased", model=None, test_size=0.2, max_length=128, epochs=3, batch_size_faithfulness=16, learning_rate=2e-5, warmup_steps=500, weight_decay=0.01, save_model_folder=None, dtype_faithfulness=torch.float32, store_faithfulness=False, **kwargs): """ Calculates the faithfulness of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). If finetuning, also has column "output" (for output ground truth). finetune_model (bool, optional): Whether to finetune the model or not. classification_with_input (bool, optional): Whether to use the input of the sentence for classification or not. model_name (str, optional): The name of the model to be used for classification (either path or name). Either this or model should be provided. model (PreTrainedModel, optional): The model to be used for classification. Either this or model_name should be provided. test_size (float, optional): The size of the test set to be used for evaluation. max_length (int, optional): The maximum length of the sentences to be used for evaluation. epochs (int, optional): The number of epochs to be used for training the model. batch_size_faithfulness (int, optional): The batch size to be used for evaluation. learning_rate (float, optional): The learning rate to be used for training the model. warmup_steps (int, optional): The number of warmup steps to be used for training the model. weight_decay (float, optional): The weight decay to be used for training the model. save_model_folder (str, optional): The folder to save the trained model. dtype_faithfulness (torch.dtype, optional): The data type to be used for the model. store_faithfulness (bool, optional): Whether to store the resulting score or not. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating faithfulness") if ("label" not in dataset.columns or all(dataset["label"] == 1) or all(dataset["label"] == 0)) and finetune_model: log(logger.info, "Dataset does not have good labels, cannot calculate faithfulness") return None if "faithfulness" in self.output: log(logger.info, f"Reloading faithfulness. Faithfulness is {self.output['faithfulness']}") return self.output["faithfulness"] set_seed(42) df = dataset.copy() if classification_with_input: df["text"] = df["input"] + df["generated"] else: df["text"] = df["generated"] if isinstance(model_name, str): tokenizer = load_tokenizer(model_name) args = TrainingArguments( output_dir="../finetune/eval/random", evaluation_strategy="epoch", save_strategy="epoch", num_train_epochs=epochs, per_device_train_batch_size=batch_size_faithfulness, per_device_eval_batch_size=batch_size_faithfulness, warmup_steps=warmup_steps, weight_decay=weight_decay, logging_dir="logs", logging_steps=100, learning_rate=learning_rate, ) if model is None: log(logger.info, "Loading model") model = load_model(model_name, classification=True, dtype=dtype_faithfulness) # we need to train the model on the dataset if finetune_model: log(logger.info, "Finetuning model") df = dataset.copy() df = df.dropna() df["text"] = df["input"] + df["output"] train, val = train_test_split(df, test_size=test_size)
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """ log(logger.debug, "Preparing dataset") if "input" not in self.dataset.columns: log(logger.debug, f"No input column found, assuming input is the first {n_input_words} words of the output") self.dataset["input"] = self.dataset["text"].apply(lambda x: " ".join(x.split()[:n_input_words])) self.dataset["output"] = self.dataset["text"].apply(lambda x: " " + " ".join(x.split()[n_input_words:])) self.has_input_task = False if "label" not in self.dataset.columns: log(logger.debug, "No label column found, assuming all labels are 1") self.dataset["label"] = 1 def evaluate_lm_eval(self, model, task_name, batch_size, num_fewshot, model_args, no_cache=False, limit=None, write_out=False, output_folder=None, **kwargs): """ Evaluates the model using the lm_eval package. Args: model (PreTrainedModel): The model to be evaluated. task_name (string): The name of the task for evaluation. batch_size (int): The batch size to be used for evaluation. num_fewshot (int): The number of fewshot examples to be used for evaluation. model_args (dict): The arguments to be passed to the model. no_cache (bool, optional): Whether to use cached results or not. limit (int, optional): The maximum number of examples to be used for evaluation. write_out (bool, optional): Whether to write out the results or not. output_folder (string, optional): The folder to write out the results. **kwargs: Additional keyword arguments. """ try: except ImportError: raise ImportError("Please install lm_eval to run this function") results = evaluator.simple_evaluate( model=model, model_args=model_args, tasks=[task_name], num_fewshot=num_fewshot, batch_size=batch_size, device="cuda" if torch.cuda.is_available() else "cpu", no_cache=no_cache, limit=limit, write_out=write_out, output_base_path=output_folder ) if "lm_eval" in self.output: self.output["lm_eval"][task_name] = results else: self.output["lm_eval"] = {task_name: results} def evaluate(self, max_tokens=128, store_file=None, reload=True, dataset_file=None, reload_data=True, preserve_memory=False, batch_size=1, do_perspective=True, speculation=False, only_faithfulness=False, **kwargs): """ Evaluates the model on the dataset and calculates the perplexity and faithfulness Args: max_tokens (int, optional): The maximum number of tokens to be used for evaluation. store_file (string, optional): The file to store the evaluation results. reload (bool, optional): Whether to reload the dataset or not if it was stored before. dataset_file (string, optional): The file containing the dataset. If path exists, dataset is loaded from path. If path does not exist, dataset is saved to path. reload_data (bool, optional): Whether to reload the data or not if it was stored before. preserve_memory (bool, optional): Whether to preserve memory or not. batch_size (int, optional): The batch size to be used for evaluation. do_perspective (bool, optional): Whether to calculate the perspective score or not. speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ if store_file is not None: os.makedirs(os.path.dirname(store_file), exist_ok=True) if dataset_file is not None: os.makedirs(os.path.dirname(dataset_file), exist_ok=True) if (reload_data or reload) and dataset_file is not None and os.path.isfile(dataset_file): log(logger.debug, f"Reloading dataset from {dataset_file}") self.dataset = pd.read_csv(dataset_file, escapechar='\\', lineterminator="\n") self.dataset.fillna("", inplace=True) else: log(logger.debug,"Generating samples") self.generate_samples(max_tokens, batch_size=batch_size, speculation=speculation, **kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") if reload and store_file is not None and os.path.isfile(store_file): self.output = json.load(open(store_file))["output"] log(logger.info, f"Reloading output. Output is {self.output}") if not reload: self.output = dict() self.dataset.dropna(inplace=True) if preserve_memory and hasattr(self.generator, "clear_memory"): # can have weird side effects! self.generator.clear_memory() if not only_faithfulness: self.perplexity(self.dataset, **kwargs) else: del self.output['faithfulness'] self.faithfulness_multiple(self.dataset, **kwargs) if do_perspective and not only_faithfulness: self.perspective(**kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") self.extra_kwargs = {kwarg: str(kwargs[kwarg]) for kwarg in kwargs} self.extra_kwargs["max_tokens"] = max_tokens if store_file is not None: log(logger.info, f"Saving output to {store_file}") self.save(store_file) return self.output def perspective(self, classification_with_input=True, **kwargs): """ Calculates the perspective score of the generated sentences. Args: classification_with_input (bool, optional): Whether to use the input of the sentence for calculating the perspective score or not. **kwargs: Additional keyword arguments. """ retries = 0 if "perspective" in self.output: log(logger.info, f"Reloading perspective. Perspective is {self.output['perspective']}") return self.output["perspective"] log(logger.info, "Calculating perspective") service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=os.environ["PERSPECTIVE_API_KEY"], discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1", static_discovery=False) scores = [] self.dataset["perspective"] = [None] * len(self.dataset) for index, sample in self.dataset.iterrows(): sentence = sample['generated'] if classification_with_input: sentence = sample['input'] + sentence analyze_request = { 'comment': { 'text': sentence }, 'languages': ["en"], 'requestedAttributes': {'TOXICITY': {}} } while retries < 10 ** 5: try: response = service.comments().analyze(body=analyze_request).execute() break except Exception as e: if "LANGUAGE_NOT_SUPPORTED_BY_ATTRIBUTE" in str(e): continue log(logger.warning, f"Exception {e} occurred, retrying...") retries += 1 time.sleep(10) if retries == 10 ** 5: log(logger.error, "Could not get all perspective scores") break value = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency) model = load_model(model_name_fluency, dtype=dtype) self.output["perplexity"] = self.get_perplexity(dataset, model, tokenizer) log(logger.info, f"Perplexity is {self.output['perplexity']}") del model torch.cuda.empty_cache() return self.output["perplexity"] def faithfulness_multiple(self, dataset, model_name, **kwargs): """Calculates the faithfulness for all stored classifiers Args: dataset (pd.DataFrame): The dataset to be used for evaluation. model_name (str, list of strings): Classifier names to use """ if not isinstance(model_name, (list, tuple)): model_name = [model_name] results = dict() for model in model_name: name = model if not isinstance(name, str): name = model.__str__() results[name] = self.faithfulness(dataset, model_name=model, **kwargs) self.output["faithfulness"] = results return results def faithfulness(self, dataset, finetune_model=True, classification_with_input=True, model_name="distilbert-base-uncased", model=None, test_size=0.2, max_length=128, epochs=3, batch_size_faithfulness=16, learning_rate=2e-5, warmup_steps=500, weight_decay=0.01, save_model_folder=None, dtype_faithfulness=torch.float32, store_faithfulness=False, **kwargs): """ Calculates the faithfulness of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). If finetuning, also has column "output" (for output ground truth). finetune_model (bool, optional): Whether to finetune the model or not. classification_with_input (bool, optional): Whether to use the input of the sentence for classification or not. model_name (str, optional): The name of the model to be used for classification (either path or name). Either this or model should be provided. model (PreTrainedModel, optional): The model to be used for classification. Either this or model_name should be provided. test_size (float, optional): The size of the test set to be used for evaluation. max_length (int, optional): The maximum length of the sentences to be used for evaluation. epochs (int, optional): The number of epochs to be used for training the model. batch_size_faithfulness (int, optional): The batch size to be used for evaluation. learning_rate (float, optional): The learning rate to be used for training the model. warmup_steps (int, optional): The number of warmup steps to be used for training the model. weight_decay (float, optional): The weight decay to be used for training the model. save_model_folder (str, optional): The folder to save the trained model. dtype_faithfulness (torch.dtype, optional): The data type to be used for the model. store_faithfulness (bool, optional): Whether to store the resulting score or not. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating faithfulness") if ("label" not in dataset.columns or all(dataset["label"] == 1) or all(dataset["label"] == 0)) and finetune_model: log(logger.info, "Dataset does not have good labels, cannot calculate faithfulness") return None if "faithfulness" in self.output: log(logger.info, f"Reloading faithfulness. Faithfulness is {self.output['faithfulness']}") return self.output["faithfulness"] set_seed(42) df = dataset.copy() if classification_with_input: df["text"] = df["input"] + df["generated"] else: df["text"] = df["generated"] if isinstance(model_name, str): tokenizer = load_tokenizer(model_name) args = TrainingArguments( output_dir="../finetune/eval/random", evaluation_strategy="epoch", save_strategy="epoch", num_train_epochs=epochs, per_device_train_batch_size=batch_size_faithfulness, per_device_eval_batch_size=batch_size_faithfulness, warmup_steps=warmup_steps, weight_decay=weight_decay, logging_dir="logs", logging_steps=100, learning_rate=learning_rate, ) if model is None: log(logger.info, "Loading model") model = load_model(model_name, classification=True, dtype=dtype_faithfulness) # we need to train the model on the dataset if finetune_model: log(logger.info, "Finetuning model") df = dataset.copy() df = df.dropna() df["text"] = df["input"] + df["output"] train, val = train_test_split(df, test_size=test_size)
train_dataset = CustomDataset(tokenizer, train, max_tokens=max_length)
1
2023-11-21 20:01:08+00:00
24k
HeliosZhao/Animate124
dnerf/utils.py
[ { "identifier": "save_tensor2image", "path": "nerf/utils.py", "snippet": "def save_tensor2image(x: torch.Tensor, path, channel_last=False, quality=75, **kwargs):\n # assume the input x is channel last\n # ipdb.set_trace()\n # if x.ndim == 4:\n # if channel_last:\n # x = x.perm...
import os import glob import tqdm import random import logging import gc import numpy as np import imageio, imageio_ffmpeg import time import cv2 import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.distributed as dist import torchvision.transforms.functional as TF import ipdb import copy from torch import Tensor from torch.utils.tensorboard import SummaryWriter from torchvision.utils import make_grid from torchmetrics.functional import pearson_corrcoef from nerf.utils import save_tensor2image, nonzero_normalize_depth, Trainer from einops import rearrange from nerf.utils import custom_meshgrid, safe_normalize from dnerf.network_4dgrid import NeRFNetwork
19,451
'rgb': pred_rgb, 'depth': pred_depth, 'normal_image': pred_normal, 'mask': pred_mask, } return out_dict def train_step(self, data): # perform RGBD loss instead of SDS if is image-conditioned do_rgbd_loss = self.opt.images is not None and \ ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0)) # ipdb.set_trace() # override random camera with fixed known camera if do_rgbd_loss: data = self.default_view_data # progressively relaxing view range if self.opt.progressive_view: r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters)) self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] # progressively increase max_level if self.opt.progressive_level: self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters)) rays_o = data['rays_o'] # [B, N, 3] # B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] # B,F,N,3 mvp = data['mvp'] # [B, 4, 4] / [B,F,4,4] time = data['time'] # [B,T] use_dynamic_cam = (rays_o.ndim == 4) B = rays_o.size(0) # ipdb.set_trace() N = rays_o.size(1) if not use_dynamic_cam else rays_o.size(2) H, W = data['H'], data['W'] # ipdb.set_trace() start_from_zero = data.get('start_from_zero', True) if start_from_zero: assert time[0,0] == 0 # When ref_data has B images > opt.batch_size if B > self.opt.batch_size: # choose batch_size images out of those B images choice = torch.randperm(B)[:self.opt.batch_size] B = self.opt.batch_size rays_o = rays_o[choice] rays_d = rays_d[choice] mvp = mvp[choice] if do_rgbd_loss: ambient_ratio = 1.0 shading = 'lambertian' # use lambertian instead of albedo to get normal as_latent = False binarize = False bg_color = self.get_bg_color( self.opt.bg_color_known, B*N, rays_o.device) # add camera noise to avoid grid-like artifact if self.opt.known_view_noise_scale > 0: noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters) rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0 ambient_ratio = 1.0 shading = 'normal' as_latent = True binarize = False bg_color = None else: if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2 ambient_ratio = 1.0 shading = 'normal' elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0 ambient_ratio = 0.1 + 0.9 * random.random() shading = 'textureless' elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0 ambient_ratio = 1.0 shading = 'albedo' else: # random shading ambient_ratio = 0.1 + 0.9 * random.random() rand = random.random() if rand < self.opt.textureless_rate: # 0.2 shading = 'textureless' else: shading = 'lambertian' as_latent = False # random weights binarization (like mobile-nerf) [NOT WORKING NOW] # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) # binarize = random.random() < binarize_thresh binarize = False # random background rand = random.random() # ipdb.set_trace() if self.opt.bg_radius > 0 and rand > 0.5: bg_color = None # use bg_net else: bg_color = torch.rand(3).to(self.device) # single color random bg ## NOTE if bg_radius < 0 -> the way magic123 use # The bg color is always random video_outputs = [] num_frames = time.size(1)
logger = logging.getLogger(__name__) class DTrainer(Trainer): def __init__(self, argv, name, opt, model, guidance, criterion=None, optimizer=None, ema_decay=None, lr_scheduler=None, metrics=[], local_rank=0, world_size=1, device=None, mute=False, fp16=False, max_keep_ckpt=1, workspace='workspace', best_mode='min', use_loss_as_metric=True, report_metric_at_train=False, use_checkpoint="latest", use_tensorboard=True, scheduler_update_every_step=False, **kwargs): super().__init__(argv, name, opt, model, guidance, criterion, optimizer, ema_decay, lr_scheduler, metrics, local_rank, world_size, device, mute, fp16, max_keep_ckpt, workspace, best_mode, use_loss_as_metric, report_metric_at_train, use_checkpoint, use_tensorboard, scheduler_update_every_step, **kwargs) self.rgbd_scale = opt.get("rgbd_scale", 1.0) self.fix_dynamic = opt.fix_dynamic if self.fix_dynamic: assert opt.backbone == 'grid4d' self.dynamic_model = NeRFNetwork(opt) # ipdb.set_trace() model_state_dict = self.model.state_dict() self.dynamic_model.load_state_dict(model_state_dict) for p in self.dynamic_model.parameters(): p.requires_grad = False self.dynamic_model.train() self.dynamic_model.to(opt.device) @torch.no_grad() def eval_static_step(self, data, shading): rays_o = data['rays_o'] # [B, N, 3] / B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] / B,F,N,3 mvp = data['mvp'] # B,4,4 / B,F,4,4 if rays_o.ndim == 4: rays_o = rays_o[:, 0] rays_d = rays_d[:, 0] mvp = mvp[:, 0] B, N = rays_o.shape[:2] H, W = data['H'], data['W'] ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 light_d = data['light_d'] if 'light_d' in data else None # ipdb.set_trace() outputs = self.static_model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=False, bg_color=None, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading) pred_rgb = outputs['image'].reshape(B, H, W, 3) pred_depth = outputs['depth'].reshape(B, H, W, 1) if self.opt.normalize_depth: pred_depth = nonzero_normalize_depth(pred_depth) if 'normal_image' in outputs: # eval mode no normal image pred_normal = outputs['normal_image'].reshape(B, H, W, 3) else: pred_normal = None pred_mask = outputs['weights_sum'].reshape(B, H, W, 1) out_dict = { 'rgb': pred_rgb, 'depth': pred_depth, 'normal_image': pred_normal, 'mask': pred_mask, } return out_dict def train_step(self, data): # perform RGBD loss instead of SDS if is image-conditioned do_rgbd_loss = self.opt.images is not None and \ ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0)) # ipdb.set_trace() # override random camera with fixed known camera if do_rgbd_loss: data = self.default_view_data # progressively relaxing view range if self.opt.progressive_view: r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters)) self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] # progressively increase max_level if self.opt.progressive_level: self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters)) rays_o = data['rays_o'] # [B, N, 3] # B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] # B,F,N,3 mvp = data['mvp'] # [B, 4, 4] / [B,F,4,4] time = data['time'] # [B,T] use_dynamic_cam = (rays_o.ndim == 4) B = rays_o.size(0) # ipdb.set_trace() N = rays_o.size(1) if not use_dynamic_cam else rays_o.size(2) H, W = data['H'], data['W'] # ipdb.set_trace() start_from_zero = data.get('start_from_zero', True) if start_from_zero: assert time[0,0] == 0 # When ref_data has B images > opt.batch_size if B > self.opt.batch_size: # choose batch_size images out of those B images choice = torch.randperm(B)[:self.opt.batch_size] B = self.opt.batch_size rays_o = rays_o[choice] rays_d = rays_d[choice] mvp = mvp[choice] if do_rgbd_loss: ambient_ratio = 1.0 shading = 'lambertian' # use lambertian instead of albedo to get normal as_latent = False binarize = False bg_color = self.get_bg_color( self.opt.bg_color_known, B*N, rays_o.device) # add camera noise to avoid grid-like artifact if self.opt.known_view_noise_scale > 0: noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters) rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0 ambient_ratio = 1.0 shading = 'normal' as_latent = True binarize = False bg_color = None else: if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2 ambient_ratio = 1.0 shading = 'normal' elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0 ambient_ratio = 0.1 + 0.9 * random.random() shading = 'textureless' elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0 ambient_ratio = 1.0 shading = 'albedo' else: # random shading ambient_ratio = 0.1 + 0.9 * random.random() rand = random.random() if rand < self.opt.textureless_rate: # 0.2 shading = 'textureless' else: shading = 'lambertian' as_latent = False # random weights binarization (like mobile-nerf) [NOT WORKING NOW] # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) # binarize = random.random() < binarize_thresh binarize = False # random background rand = random.random() # ipdb.set_trace() if self.opt.bg_radius > 0 and rand > 0.5: bg_color = None # use bg_net else: bg_color = torch.rand(3).to(self.device) # single color random bg ## NOTE if bg_radius < 0 -> the way magic123 use # The bg color is always random video_outputs = [] num_frames = time.size(1)
light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device))
4
2023-11-23 10:34:08+00:00
24k
alexzhou907/DreamPropeller
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,281
"+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self,*args, **kwargs) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-11-27 23:39:49+00:00
24k
abdulhaim/LMRL-Gym
llm_rl_scripts/text_nav/ilql/train_ilql.py
[ { "identifier": "ilql_loss", "path": "LLM_RL/algorithms/ilql/base_interface.py", "snippet": "def ilql_loss(\n q1: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q2: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n v: jax.Array, # [batch, time-1] output is masked; shi...
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from transformers import AutoTokenizer from JaxSeq.utils import jsonl_stream, convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask, MapIterable, jsonl_stream, FileOpenIterable from JaxSeq.models.gptj.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ilql.base_interface import ilql_loss from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain, text_history_to_str from LLM_RL.algorithms.ilql.gpt2.interface import GPT2ILQLInference, GPT2ILQLTrain from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy, GPT2ValueRLInference from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ilql.data import ILQLIterableDataset from functools import partial from JaxSeq.logs import log, pull_logs from LLM_RL.algorithms.ilql.train import train_loop, eval_loss from LLM_RL.algorithms.ilql.data import ILQLData, ILQLIterableDataset from JaxSeq.utils import multihost_device_get from llm_rl_scripts.text_nav.env import TextNavEnv import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json
21,481
input_args = locals() print(input_args) tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-j-6B') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def expand(lst, default_value): new_lst = [default_value] for elem in lst: new_lst.append(elem) new_lst.append(default_value) return new_lst def map_data_item(item): text_trajectory_chain = TextTrajectoryChain( text_trajectory=TextTrajectory( # Actions are at odd indices text_history=[Text(text, i % 2 != 0) for i, text in enumerate(item['text_history'])], reward=expand(item['rewards'], 0.0), done=item['dones'][-1], ), next=None, ) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(text_trajectory_chain, tokenizer) return ILQLData.from_token_trajectory_chain(token_trajectory_chain) train_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(train_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) eval_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(eval_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy with jax.default_device(jax.devices('cpu')[0]): target_base_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) target_base_params = shard_params_from_params( model=base_model, params=target_base_params, ) with jax.default_device(jax.devices('cpu')[0]): pi_beta_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) pi_beta_params = shard_params_from_params( model=base_model, params=pi_beta_params, ) q1_prng_key = jax.random.PRNGKey(4) q1_head_train_state, q_head = load_head_train_state_from_config(
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, eval_data_path: str, vocab_file: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=False, wandb_project: Optional[str]=None, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, train_bsize: int=32, grad_accum_steps: int=1, bf16_activations: bool=False, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=512, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=None, save_at_beginning: bool=False, save_at_end: bool=False, save_best: bool=True, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, policy_bsize: int=32, policy_n_rollouts: int=32, eval_loss_bsize: int=32, eval_loss_batches: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, beta: float=16.0, detach_q1: bool=False, detach_q2: bool=False, detach_v: bool=False, polyak_alpha: float=0.005, hard_update_every: Optional[int]=None, gamma: float=0.99, tau: float=0.8, cql_weight: float=0.00, ): input_args = locals() print(input_args) tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-j-6B') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def expand(lst, default_value): new_lst = [default_value] for elem in lst: new_lst.append(elem) new_lst.append(default_value) return new_lst def map_data_item(item): text_trajectory_chain = TextTrajectoryChain( text_trajectory=TextTrajectory( # Actions are at odd indices text_history=[Text(text, i % 2 != 0) for i, text in enumerate(item['text_history'])], reward=expand(item['rewards'], 0.0), done=item['dones'][-1], ), next=None, ) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(text_trajectory_chain, tokenizer) return ILQLData.from_token_trajectory_chain(token_trajectory_chain) train_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(train_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) eval_dataset = ILQLIterableDataset.from_ilql_data_iterable( MapIterable(map_data_item, FileOpenIterable(convert_path(eval_data_path), 'r', pipe=jsonl_stream)), tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, ), ) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy with jax.default_device(jax.devices('cpu')[0]): target_base_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) target_base_params = shard_params_from_params( model=base_model, params=target_base_params, ) with jax.default_device(jax.devices('cpu')[0]): pi_beta_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), base_train_state.params, ) pi_beta_params = shard_params_from_params( model=base_model, params=pi_beta_params, ) q1_prng_key = jax.random.PRNGKey(4) q1_head_train_state, q_head = load_head_train_state_from_config(
model_config=MLPHeadConfig(
7
2023-11-21 00:16:42+00:00
24k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Ker...
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
15,326
self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos" encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, zero_triu, ) else: raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) # feed-forward module definition if ffn_layer_type == "linear": ffn_layer = PositionwiseFeedForward ffn_layer_args = ( attention_dim, linear_units, dropout_rate, activation, ) elif ffn_layer_type == "conv1d": ffn_layer = MultiLayeredConv1d ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) elif ffn_layer_type == "conv1d-linear": ffn_layer = Conv1dLinear ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = (attention_dim, cnn_module_kernel, activation) self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( attention_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), ffn_layer(*ffn_layer_args), ffn_layer(*ffn_layer_args) if macaron_style else None, convolution_layer(*convolution_layer_args) if use_cnn_module else None, dropout_rate, normalize_before, concat_after, stochastic_depth_rate * float(1 + lnum) / num_blocks, ), ) if self.normalize_before: self.after_norm = LayerNorm(attention_dim) self.intermediate_layers = intermediate_layers self.use_conditioning = True if ctc_softmax is not None else False if self.use_conditioning: self.ctc_softmax = ctc_softmax self.conditioning_layer = torch.nn.Linear( conditioning_layer_dim, attention_dim ) self.enable_fusion = enable_fusion self.fusion_type = fusion_type if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']): raise NotImplementedError if self.fusion_type == 'daf_1d': self.fusion_model = DAF() elif self.fusion_type == 'aff_1d': self.fusion_model = AFF(channels=attention_dim, type='1D') elif self.fusion_type == 'iaff_1d': self.fusion_model = iAFF(channels=attention_dim, type='1D') elif (self.enable_fusion) and (self.fusion_type in ['attnpool_1d']):
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos" encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, zero_triu, ) else: raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) # feed-forward module definition if ffn_layer_type == "linear": ffn_layer = PositionwiseFeedForward ffn_layer_args = ( attention_dim, linear_units, dropout_rate, activation, ) elif ffn_layer_type == "conv1d": ffn_layer = MultiLayeredConv1d ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) elif ffn_layer_type == "conv1d-linear": ffn_layer = Conv1dLinear ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = (attention_dim, cnn_module_kernel, activation) self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( attention_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), ffn_layer(*ffn_layer_args), ffn_layer(*ffn_layer_args) if macaron_style else None, convolution_layer(*convolution_layer_args) if use_cnn_module else None, dropout_rate, normalize_before, concat_after, stochastic_depth_rate * float(1 + lnum) / num_blocks, ), ) if self.normalize_before: self.after_norm = LayerNorm(attention_dim) self.intermediate_layers = intermediate_layers self.use_conditioning = True if ctc_softmax is not None else False if self.use_conditioning: self.ctc_softmax = ctc_softmax self.conditioning_layer = torch.nn.Linear( conditioning_layer_dim, attention_dim ) self.enable_fusion = enable_fusion self.fusion_type = fusion_type if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']): raise NotImplementedError if self.fusion_type == 'daf_1d': self.fusion_model = DAF() elif self.fusion_type == 'aff_1d': self.fusion_model = AFF(channels=attention_dim, type='1D') elif self.fusion_type == 'iaff_1d': self.fusion_model = iAFF(channels=attention_dim, type='1D') elif (self.enable_fusion) and (self.fusion_type in ['attnpool_1d']):
self.attnpool = AttentionPool1d(max_seq_len, attention_dim, attention_heads)
17
2023-11-25 02:38:32+00:00
24k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "Poi...
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
15,221
"""Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module()
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1) @HEADS.register_module() class PSCCoder(BaseBBoxCoder): """Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module()
class PointOBBHead(StandardRoIHead):
5
2023-11-20 07:50:12+00:00
24k
ModelTC/EasyLLM
llm/models/hf_models/qwen_vl/modeling_qwen.py
[ { "identifier": "QWenConfig", "path": "llm/models/hf_models/qwen_vl/configuration_qwen.py", "snippet": "class QWenConfig(PretrainedConfig):\n model_type = \"qwen\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_...
import importlib import math import torch # noqa import torch.nn.functional as F # noqa import torch.utils.checkpoint # noqa from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, Callable, List, Any, Generator # noqa from torch.cuda.amp import autocast # noqa from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList # noqa from transformers.generation.logits_process import LogitsProcessorList # noqa from transformers.generation.streamers import BaseStreamer # noqa from transformers.generation.utils import GenerateOutput # noqa from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel # noqa from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig # noqa from .qwen_generation_utils import ( make_context, ) # noqa from llm.models.hf_models.qwen.qwen_generation_utils import ( HistoryType, decode_tokens, get_stop_words_ids, ) from .visual import VisionTransformer from llm.models.hf_models.qwen.modeling_qwen import RMSNorm, apply_rotary_pos_emb, QWenMLP from llm.models.hf_models.qwen.modeling_qwen import QWenAttention as QWenAttention_chat from llm.models.hf_models.qwen.modeling_qwen import QWenModel as QWenModel_chat from llm.models.hf_models.qwen.modeling_qwen import QWenLMHeadModel as QWenLMHeadModel_chat from einops import rearrange
15,370
tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, )
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ apply_rotary_emb_func = None rms_norm = None # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, )
self.mlp = QWenMLP(config)
6
2023-11-26 10:12:52+00:00
24k
danilonumeroso/conar
models/tsp_reasoner.py
[ { "identifier": "vmapped_beam_search_rollout", "path": "baselines/beam_search.py", "snippet": "BEAM_WIDTH = 128\ndef expand_single(beam_vis, beam_last, beam_cost, beam_par, W):\ndef beam_search_rollout_step(W, beam_width, i, tpl):\ndef beam_search_rollout(start_route, W, num_nodes, beam_width):\ndef bea...
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from baselines.beam_search import vmapped_beam_search_rollout, BEAM_WIDTH from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from hyperparameters import get_hyperparameters from torch_geometric.utils import k_hop_subgraph from datasets._configs import CONFIGS from utils_execution import cross_entropy, check_edge_index_sorted, prepare_constants, edge_one_hot_encode_pointers, get_number_of_nodes from clrs import Type, Location, Stage import copy import itertools import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter import torch_geometric import pytorch_lightning as pl
15,341
break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, use_TF=use_TF, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = TSPReasoner(self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, use_TF=use_TF, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.ensure_permutation = ensure_permutation self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_tour_metrics(self, output_logits, batch): def get_mask(edges): mask = torch.zeros_like(batch.edge_index[0]) j = 0 for i in range(batch.edge_index.shape[1]): u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
CONFIGS[self.algorithm][split]['num_samples'],
4
2023-11-20 15:32:43+00:00
24k
bearyi26/DCPT
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, H...
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet, BDD100K_Night, SHIFT_Night, ExDark from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
21,048
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb")
datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader))
8
2023-11-20 06:41:15+00:00
24k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,561
mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', } model_VL = VisionLAN(**cfg['args']) model_path = cfg['init_state_dict'] if path is None else path print('load pre_trained VisionLAN model from %s' % model_path) model_VL = model_VL.to(self.device) model_VL = nn.DataParallel(model_VL) if cfg['init_state_dict'] != None: fe_state_dict_ori = torch.load(model_path) fe_state_dict = OrderedDict() for k, v in fe_state_dict_ori.items(): if 'module' not in k: k = 'module.' + k else: k = k.replace('features.module.', 'module.features.') fe_state_dict[k] = v model_dict_fe = model_VL.state_dict() state_dict_fe = {k: v for k, v in fe_state_dict.items() if k in model_dict_fe.keys()} model_dict_fe.update(state_dict_fe) model_VL.load_state_dict(model_dict_fe) return model_VL def parse_visionlan_data(self, imgs_input): imgs_input = transforms.ToPILImage()(imgs_input).convert('RGB') imgs_input = cv2.resize(np.array(imgs_input), (256, 64)) imgs_input = transforms.ToTensor()(imgs_input).unsqueeze(0) imgs_input = imgs_input.to(self.device) return imgs_input def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def on_save_checkpoint(self, checkpoint): if not isinstance(self.cond_stage_model, torch.nn.Identity): self.cond_stage_model.save_state_dict( '/home/zhouyuxuan/latent-diffusion/crnn_ckpt/', self.current_epoch) @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # print(x.shape) # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[1] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) # print('weighting',weighting.shape,Ly,Lx) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[1] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: # if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) if self.text_prior_enable: c = self.get_additional_cond(xc, c) # c = {'c_concat': [xc], 'c_crossattn': [c]} else: c = xc if bs is not None: if isinstance(c, dict): for k, v in c.items(): c[k] = [v[0][:bs]] else: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] # print('fuck',c.shape) if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape print('decode z shape', z.shape) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") print(ks, stride, uf) fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape print('encode x shape', x.shape) print('ks', ks, 'stride', stride, 'df', df) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) print('encode z shape', z.shape) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def on_validation_start(self) -> None: print(f'******************************in validation {self.current_epoch}') def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) if self.fid_eval and self.current_epoch % 10 == 0: results = self.recognize_sample(batch, N=114514, inpaint=False) rec_image = results['samples'] target = batch[self.first_stage_key] target = rearrange(target, 'b h w c -> b c h w') cond = batch[self.cond_stage_key] cond = rearrange(cond, 'b h w c -> b c h w') if self.visualize: batchlen = rec_image.shape[0] rc = int(math.sqrt(batchlen)) f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) print(len(axs), batchlen, int(math.sqrt(batchlen))) assert len(axs) ** 2 == batchlen for i in range(batchlen): axs[i // rc, i % rc].set_xticklabels([]) axs[i // rc, i % rc].set_yticklabels([]) axs[i // rc, i % rc].set_aspect('equal') axs[i // rc, i % rc].imshow(rec_image[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/sample_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(target[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/target_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(cond[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/input_{batch_idx}.jpg') PSNR = self.cal_psnr(rec_image[:, :3], target[:, :3]) SSIM = self.cal_ssim(rec_image[:, :3], target[:, :3]) self.log_dict({'PSNR': PSNR, 'SSIM': SSIM}, prog_bar=False, logger=True, on_step=False, on_epoch=True) def shared_step(self, batch, **kwargs): # print('*******************************************************batch',batch['image'].shape) # print('*******************************************************batch',batch['image'].shape) # if hasattr(self, "split_input_params"): # print(self.split_input_params) # else: # print('fuck') x, c = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x, c) if self.recog_loss_enable: HR = batch['image'] HR = rearrange(HR, 'b h w c -> b c h w') HR = HR.to(memory_format=torch.contiguous_format).float() LR = c label_vecs = self.get_learned_conditioning(c).permute(1, 0, 2) label_vecs_hr = self.get_learned_conditioning(HR).permute(1, 0, 2) loss_recog_distill = sem_loss(label_vecs, label_vecs_hr) * 100 # 100 loss = loss + loss_recog_distill loss_dict.update({f'loss_recog': loss_recog_distill}) # return loss + loss_recog_distill, loss_dict # # else: return loss, loss_dict def get_additional_cond(self, c, tp): if self.stn: _, ctrl_points_c = self.stn_head(c) c, _ = self.tps(c, ctrl_points_c) if self.standard_text: x_q = torch.empty(1, 2, c.shape[2], c.shape[3]) # prob_lr = torch.empty(1, 25, 37) rec_results = get_string_crnn(tp.permute(1, 0, 2), False) for i in range(c.shape[0]): # visionlan_dict_lr = self.parse_visionlan_data(c[i, :3, :, :]) # target = '' # label_lr, label_length = self.VL_model(visionlan_dict_lr, target, '', False) # pred_str_lr, pred_prob = self.test_acc_counter.convert(label_lr, label_length) # s = pred_str_lr[0] # prob_lr = torch.cat([prob_lr, pred_prob], dim=0) s = rec_results[i] if s == "" or type(s) == torch.Tensor: s = "\t" lower_case = s.lower() upper_case = s.upper() i_t_lower = make_standard_text(self.font_path, lower_case, (c.shape[2], c.shape[3])) i_t_lower_tensor = torch.from_numpy(i_t_lower).unsqueeze(0).unsqueeze(0) i_t_upper = make_standard_text(self.font_path, upper_case, (c.shape[2], c.shape[3])) i_t_upper_tensor = torch.from_numpy(i_t_upper).unsqueeze(0).unsqueeze(0) i_t_tensor = torch.cat([i_t_lower_tensor, i_t_upper_tensor], dim=1) x_q = torch.cat([x_q, i_t_tensor], dim=0) x_q = x_q[1:] # prob_lr = prob_lr[1:] x_q = x_q.to(self.device) # prob_lr = prob_lr.to(self.device) c = torch.cat([c, x_q], dim=1) return {'c_concat': [c], 'c_crossattn': [tp]} def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.text_prior_enable and self.model.conditioning_key == 'hybrid': tp = self.get_learned_conditioning(c) c = self.get_additional_cond(c, tp) else: if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) # print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) # print("reducing stride") # print('ddpm','x_noisy shape',x_noisy.shape,'ks',ks,'stride',stride) fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) self.logvar = self.logvar.to(self.device) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
elif isimage(xc):
4
2023-11-20 06:34:21+00:00
24k
microsoft/Project-BayesDAG
src/causica/models/visl.py
[ { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init_...
import json import math import os import warnings import numpy as np # type: ignore import torch import torch.distributions as tdist import torch.nn.functional as F from typing import Callable, Dict, List, Optional, Tuple from torch import nn from torch.utils.data import DataLoader, TensorDataset from ..datasets.dataset import Dataset from ..datasets.variables import Variables from ..models.imodel import IModelForCausalInference from ..utils.helper_functions import to_tensors from ..utils.io_utils import save_json from ..utils.nri_utils import compute_dag_loss, get_feature_indices_per_node, kl_categorical, piecewise_linear from ..utils.training_objectives import get_input_and_scoring_masks, kl_divergence, negative_log_likelihood from .pvae_base_model import PVAEBaseModel
18,895
lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE) input_mask, scoring_mask = get_input_and_scoring_masks( mask_train_batch, max_p_train_dropout=max_p_train_dropout, score_imputation=True, score_reconstruction=True, ) # Apply the GNN-based VAE (x_reconstructed, _), _, encoder_output = self.reconstruct( x, input_mask, only_forward=only_forward, count=sample_count ) # Loss: lambda_nll * NLL + # lambda_kl_z * KL(q(z)||p(z)) + # lambda_kl_A * KL(q(A)||p(A)) + # piecewise_linear_temperature * lambda_dagloss * dag_loss # NLL dec_logvar = torch.full_like(x_reconstructed, math.log(output_variance)) categorical_lik_coeff = 1.0 nll = negative_log_likelihood( x, x_reconstructed, dec_logvar, self.variables, categorical_lik_coeff, scoring_mask ) nll_term = lambda_nll * nll # KL(q(z)||p(z)) term
# This is required in python 3 to allow return types of the same class. from __future__ import annotations class VISL(PVAEBaseModel, IModelForCausalInference): """ Subclass of `models.pvae_base_model.PVAEBaseModel` representing the algorithm VISL (missing value imputation with causal discovery). Requires file <data_dir>/<dataset_name>/adj_matrix.csv to evaluate causal discovery against ground truth. """ def __init__( self, model_id: str, variables: Variables, save_dir: str, device: torch.device, gnn_iters: int, shared_init_and_final_mappings: bool, embedding_dim: int, init_prob: float, simpler: str = None, **_, ): """ Args: model_id: Unique model ID for referencing this model instance. variables: Information about variables/features used by this model. save_dir: Location to save any information about this model, including training data. device: Device to load model to. gnn_iters: Number of message passing iterations for the GNN. shared_init_and_final_mappings: Whether all the nodes should use the same MLPs for the initial and final mappings. embedding_dim: Dimensionality of the nodes embedding. init_prob: Initial probability of having edge. simpler: Choose what MLP should be simpler (options are 'forward', 'backward', or None). Specifically, 'simpler' means to divide by 10 the dimensionality of the hidden layer of the corresponding MLP (with a minimum of 10 units). """ super().__init__(model_id, variables, save_dir, device) # Define some useful attributes feature_indices_per_node, ordered_nodes = get_feature_indices_per_node(variables) with open(os.path.join(self.save_dir, "ordered_nodes.json"), "w", encoding="utf-8") as f: json.dump(ordered_nodes, f, indent=4) self.num_nodes = len(feature_indices_per_node) self.num_edges = self.num_nodes * (self.num_nodes - 1) self.input_dim = variables.num_processed_cols # Define and initialize Z_edges # The learnable parameter is Z_edges_logits. Z_edges is F.softmax(Z_edges_logits, dim=1). self.Z_edges_logits = torch.nn.Parameter( torch.stack( [ torch.full([self.num_edges], math.log(1 - init_prob)), torch.full([self.num_edges], math.log(init_prob)), ], dim=1, ).to(device) ) # Define the GNN-based VAE self.gnn_vae = GNN_based_VAE( embedding_dim=embedding_dim, skip_first=True, device=device, n_iters=gnn_iters, num_nodes=self.num_nodes, shared_init_and_final_mappings=shared_init_and_final_mappings, simpler=simpler, feature_indices_per_node=feature_indices_per_node, ) # Create rel_rec and rel_send, which codify the receiving and sending node for each edge # Shape of rel_rec and rel_send: [num_edges, num_nodes] # The second dimension is a one-hot encoding of the receiver or sender node off_diag = np.ones([self.num_nodes, self.num_nodes]) - np.eye(self.num_nodes) rel_rec = F.one_hot(torch.tensor(np.where(off_diag)[0], dtype=torch.long)) rel_send = F.one_hot(torch.tensor(np.where(off_diag)[1], dtype=torch.long)) self.rel_rec = rel_rec.type(torch.float).to(device) self.rel_send = rel_send.type(torch.float).to(device) # Define the prior over edge types (favors sparse graphs) self.log_prior = torch.log( torch.tensor([0.95, 0.05], device=device) ) # The no-edge type is the first one (recall the skip_first argument of GNN_based_VAE __init__) # Save type of variables. Used in reconstruct method for # 1. filling the missing values before applying the GNN-based VAE, # 2. processing the output of the GNN-based VAE (i.e. use torch.sigmoid in the binary case) types = np.array([v.type_ for v in self.variables._variables]) if (types == "binary").all(): self.var_types = "binary" elif (types == "continuous").all(): self.var_types = "continuous" elif (types == "categorical").all(): self.var_types = "categorical" else: raise ValueError("Right now all the variables need to have the same type") def _train( # type: ignore self, dataset: Dataset, report_progress_callback: Optional[Callable[[str, int, int], None]], learning_rate: float, batch_size: int, epochs: int, max_p_train_dropout: float, use_dag_loss: bool, output_variance: float, hard: bool, two_steps: bool, lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE) input_mask, scoring_mask = get_input_and_scoring_masks( mask_train_batch, max_p_train_dropout=max_p_train_dropout, score_imputation=True, score_reconstruction=True, ) # Apply the GNN-based VAE (x_reconstructed, _), _, encoder_output = self.reconstruct( x, input_mask, only_forward=only_forward, count=sample_count ) # Loss: lambda_nll * NLL + # lambda_kl_z * KL(q(z)||p(z)) + # lambda_kl_A * KL(q(A)||p(A)) + # piecewise_linear_temperature * lambda_dagloss * dag_loss # NLL dec_logvar = torch.full_like(x_reconstructed, math.log(output_variance)) categorical_lik_coeff = 1.0 nll = negative_log_likelihood( x, x_reconstructed, dec_logvar, self.variables, categorical_lik_coeff, scoring_mask ) nll_term = lambda_nll * nll # KL(q(z)||p(z)) term
kl_z_term = lambda_kl_z * kl_divergence(encoder_output).sum()
10
2023-11-21 12:55:08+00:00
24k
Yifei-Y/Openset-RCNN
openset_rcnn/evaluation/os_coco_evaluation.py
[ { "identifier": "GRASPNET_KNOWN_IDS", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_IDS = [graspnet_known_name_id_dic[name_cat] for name_cat in GRASPNET_KNOWN_CATEGORIES]" }, { "identifier": "GRASPNET_KNOWN_CATEGORIES", "path": "openset_rcnn/data/graspnet_meta.py",...
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from detectron2.evaluation.evaluator import DatasetEvaluator from detectron2.evaluation.coco_evaluation import instances_to_coco_json from openset_rcnn.data.graspnet_meta import GRASPNET_KNOWN_IDS, GRASPNET_KNOWN_CATEGORIES from .os_cocoeval import OpensetCOCOEval
17,239
[32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, } def _evaluate_predictions_on_coco( coco_gt, coco_results, iou_type, eval_type, known_names, known_ids, img_ids=None, max_dets_per_image=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 if iou_type == "segm": coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results) assert eval_type == "openset" for _, ann in enumerate(coco_gt.dataset['annotations']): if ann['category_id'] not in known_ids: ann['category_id'] = 1000
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self.known_names = GRASPNET_KNOWN_CATEGORIES self.known_ids = GRASPNET_KNOWN_IDS self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset if self._do_evaluation: self._kpt_oks_sigmas = kpt_oks_sigmas self.eval_type = eval_type def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) if len(prediction) > 1: self._predictions.append(prediction) def evaluate(self, img_ids=None, resume=False): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset resume: if resume is True, read the saved detection results to save time. """ if not resume: if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) else: file_path = os.path.join(self._output_dir, "instances_predictions.pth") predictions = torch.load(file_path) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions, img_ids=img_ids, resume=resume) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _tasks_from_predictions(self, predictions): """ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. """ tasks = {"bbox"} for pred in predictions: if "segmentation" in pred: tasks.add("segm") if "keypoints" in pred: tasks.add("keypoints") return sorted(tasks) def save_json(self, output_dir): PathManager.mkdirs(output_dir) coco_results = list(itertools.chain(*[x["instances"] for x in self._predictions])) if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in coco_results: result["category_id"] = reverse_id_mapping[result["category_id"]] if result["category_id"] not in self.known_ids: result["category_id"] = 1000 file_path = os.path.join(output_dir, "coco_instances_results.json") with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() def _eval_predictions(self, predictions, img_ids=None, resume=False): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for COCO format ...") coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) tasks = self._tasks or self._tasks_from_predictions(coco_results) # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} if self.eval_type != "Closeset": reverse_id_mapping[1000] = 1000 for result in coco_results: category_id = result["category_id"] assert category_id < num_classes or category_id == 1000, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}] or 1000." ) result["category_id"] = reverse_id_mapping[category_id] else: for result in coco_results: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir and not resume: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info( "Evaluating predictions with {} COCO API...".format( "unofficial" if self._use_fast_impl else "official" ) ) for task in sorted(tasks): assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" coco_eval = ( _evaluate_predictions_on_coco( self._coco_api, coco_results, task, self.eval_type, self.known_names, self.known_ids, img_ids=img_ids, max_dets_per_image=self._max_dets_per_image, ) if len(coco_results) > 0 else None # cocoapi does not handle empty results very well ) res = self._derive_coco_results( coco_eval, task, self.eval_type, class_names=self._metadata.get("thing_classes") ) self._results[task] = res def _eval_box_proposals(self, predictions): """ Evaluate the box proposals in predictions. Fill self._results with the metrics for "box_proposals" task. """ if self._output_dir: # Saving generated box proposals to file. # Predicted box_proposals are in XYXY_ABS mode. bbox_mode = BoxMode.XYXY_ABS.value ids, boxes, objectness_logits = [], [], [] for prediction in predictions: ids.append(prediction["image_id"]) boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) proposal_data = { "boxes": boxes, "objectness_logits": objectness_logits, "ids": ids, "bbox_mode": bbox_mode, } with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: pickle.dump(proposal_data, f) if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating bbox proposals ...") res = {} areas = {"all": "", "small": "s", "medium": "m", "large": "l"} for limit in [100, 1000]: for area, suffix in areas.items(): stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) key = "AR{}@{:d}".format(suffix, limit) res[key] = float(stats["ar"].item() * 100) self._logger.info("Proposal metrics: \n" + create_small_table(res)) self._results["box_proposals"] = res def _derive_coco_results(self, coco_eval, iou_type, eval_type, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ if eval_type == "openset": metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR10", "AR20", "AR30", "AR50", "AR100", "ARs", "ARm", "ARl"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR10", "AR20", "AR30", "AR50", "AR100", "ARs", "ARm", "ARl"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } results['WI'] = coco_eval.stats[14] results['AOSE'] = coco_eval.stats[15] self._logger.info("Evaluation type is {} known: \n".format(self.eval_type)) self._logger.info( "Evaluation results for known {}: \n".format(iou_type) + create_small_table(results) ) results_unk = { metric: float(coco_eval.stats[idx+16] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info("Evaluation type is {} unknown: \n".format(self.eval_type)) self._logger.info( "Evaluation results for unknown {}: \n".format(iou_type) + create_small_table(results_unk) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") class_names = self.known_names # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval_kdt["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) unk_precisions = coco_eval.eval_unkdt["precision"] unk_precisions = unk_precisions[:, :, 0, -1] unk_precisions = unk_precisions[unk_precisions > -1] unk_ap = np.mean(unk_precisions) if unk_precisions.size else float("nan") results_per_category.append(("unknown", float(unk_ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format(iou_type) + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) np.save(os.path.join(self._output_dir, "known_precision_" + iou_type + ".npy"), coco_eval.eval_kdt["precision"]) np.save(os.path.join(self._output_dir, "known_recall_" + iou_type + ".npy"), coco_eval.eval_kdt["recall"]) np.save(os.path.join(self._output_dir, "unknown_precision_" + iou_type + ".npy"), coco_eval.eval_unkdt["precision"]) np.save(os.path.join(self._output_dir, "unknown_recall_" + iou_type + ".npy"), coco_eval.eval_unkdt["recall"]) return results elif eval_type == "cls_agn_unk": metrics = { "bbox": ["AR10", "AR20", "AR30", "AR50", "AR100", "AP"], "segm": ["AR10", "AR20", "AR30", "AR50", "AR100", "AP"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info("Evaluation type is {}: \n".format(self.eval_type)) self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, } def _evaluate_predictions_on_coco( coco_gt, coco_results, iou_type, eval_type, known_names, known_ids, img_ids=None, max_dets_per_image=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 if iou_type == "segm": coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results) assert eval_type == "openset" for _, ann in enumerate(coco_gt.dataset['annotations']): if ann['category_id'] not in known_ids: ann['category_id'] = 1000
coco_eval = OpensetCOCOEval(coco_gt, coco_dt, iou_type)
2
2023-11-21 01:47:01+00:00
24k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/unet_2d_condition_flax.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~Conf...
from typing import Dict, Optional, Tuple, Union from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxCrossAttnUpBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, FlaxUpBlock2D, ) import flax import flax.linen as nn import jax import jax.numpy as jnp
19,618
sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): Enable memory efficient attention as described [here](https://arxiv.org/abs/2112.05682). split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str, ...] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 use_memory_efficient_attention: bool = False split_head_dim: bool = False transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1 addition_embed_type: Optional[str] = None addition_time_embed_dim: Optional[int] = None addition_embed_type_num_heads: int = 64 projection_class_embeddings_input_dim: Optional[int] = None def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} added_cond_kwargs = None if self.addition_embed_type == "text_time": # we retrieve the expected `text_embeds_dim` by first checking if the architecture is a refiner # or non-refiner architecture and then by "reverse-computing" from `projection_class_embeddings_input_dim` is_refiner = ( 5 * self.config.addition_time_embed_dim + self.config.cross_attention_dim == self.config.projection_class_embeddings_input_dim ) num_micro_conditions = 5 if is_refiner else 6 text_embeds_dim = self.config.projection_class_embeddings_input_dim - ( num_micro_conditions * self.config.addition_time_embed_dim ) time_ids_channels = self.projection_class_embeddings_input_dim - text_embeds_dim time_ids_dims = time_ids_channels // self.addition_time_embed_dim added_cond_kwargs = { "text_embeds": jnp.zeros((1, text_embeds_dim), dtype=jnp.float32), "time_ids": jnp.zeros((1, time_ids_dims), dtype=jnp.float32), } return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)["params"] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = self.num_attention_heads or self.attention_head_dim # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ The output of [`FlaxUNet2DConditionModel`]. Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: jnp.ndarray @flax_register_to_config class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its general usage and behavior. Inherent JAX features such as the following are supported: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D")`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): Enable memory efficient attention as described [here](https://arxiv.org/abs/2112.05682). split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str, ...] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 use_memory_efficient_attention: bool = False split_head_dim: bool = False transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1 addition_embed_type: Optional[str] = None addition_time_embed_dim: Optional[int] = None addition_embed_type_num_heads: int = 64 projection_class_embeddings_input_dim: Optional[int] = None def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} added_cond_kwargs = None if self.addition_embed_type == "text_time": # we retrieve the expected `text_embeds_dim` by first checking if the architecture is a refiner # or non-refiner architecture and then by "reverse-computing" from `projection_class_embeddings_input_dim` is_refiner = ( 5 * self.config.addition_time_embed_dim + self.config.cross_attention_dim == self.config.projection_class_embeddings_input_dim ) num_micro_conditions = 5 if is_refiner else 6 text_embeds_dim = self.config.projection_class_embeddings_input_dim - ( num_micro_conditions * self.config.addition_time_embed_dim ) time_ids_channels = self.projection_class_embeddings_input_dim - text_embeds_dim time_ids_dims = time_ids_channels // self.addition_time_embed_dim added_cond_kwargs = { "text_embeds": jnp.zeros((1, text_embeds_dim), dtype=jnp.float32), "time_ids": jnp.zeros((1, time_ids_dims), dtype=jnp.float32), } return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)["params"] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = self.num_attention_heads or self.attention_head_dim # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time
self.time_proj = FlaxTimesteps(
4
2023-12-28 08:17:40+00:00
24k
FoundationVision/UniRef
detectron2/evaluation/coco_evaluation.py
[ { "identifier": "CfgNode", "path": "detectron2/config/config.py", "snippet": "class CfgNode(_CfgNode):\n \"\"\"\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n ...
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.evaluation.refcocoeval import RefCOCOeval
14,835
self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) # results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json(instances, img_id): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
# Copyright (c) Facebook, Inc. and its affiliates. try: except ImportError: COCOeval_opt = COCOeval class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), allow_cached_coco=True, force_tasks=None, refcoco=False ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (int): limit on the maximum number of detections per image. By default in COCO, this limit is to 100, but this can be customized to be greater, as is needed in evaluation metrics AP fixed and AP pool (see https://arxiv.org/pdf/2102.01066.pdf) This doesn't affect keypoint evaluation. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. allow_cached_coco (bool): Whether to use cached coco json from previous validation runs. You should set this to False if you need to use different validation data. Defaults to True. """ self.dataset_name = dataset_name self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self.force_tasks = force_tasks self.refcoco = refcoco if use_fast_impl and (COCOeval_opt is COCOeval): self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") use_fast_impl = False self._use_fast_impl = use_fast_impl # COCOeval requires the limit on the number of detections per image (maxDets) to be a list # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the # 3rd element (100) is used as the limit on the number of detections per image when # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] else: max_dets_per_image = [1, 10, max_dets_per_image] self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._coco_api.dataset if self._do_evaluation: self._kpt_oks_sigmas = kpt_oks_sigmas def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) if len(prediction) > 1: self._predictions.append(prediction) def evaluate(self, img_ids=None): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions, img_ids=img_ids) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _tasks_from_predictions(self, predictions): """ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. """ tasks = {"bbox"} for pred in predictions: if "segmentation" in pred: tasks.add("segm") if "keypoints" in pred: tasks.add("keypoints") return sorted(tasks) def _eval_predictions(self, predictions, img_ids=None): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for COCO format ...") coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) tasks = self._tasks or self._tasks_from_predictions(coco_results) if self.force_tasks is not None: tasks = self.force_tasks # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in coco_results: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: if "refcoco" in self.dataset_name: file_path = os.path.join(self._output_dir, "{}_instances_results.json".format(self.dataset_name)) else: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info( "Evaluating predictions with {} COCO API...".format( "unofficial" if self._use_fast_impl else "official" ) ) for task in sorted(tasks): assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" coco_eval = ( _evaluate_predictions_on_coco( self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas, use_fast_impl=self._use_fast_impl, img_ids=img_ids, max_dets_per_image=self._max_dets_per_image, refcoco=self.refcoco ) if len(coco_results) > 0 else None # cocoapi does not handle empty results very well ) if not self.refcoco: res = self._derive_coco_results( coco_eval, task, class_names=self._metadata.get("thing_classes") ) self._results[task] = res else: res = self._derive_refcoco_results(coco_eval, task) self._results[task] = res def _eval_box_proposals(self, predictions): """ Evaluate the box proposals in predictions. Fill self._results with the metrics for "box_proposals" task. """ if self._output_dir: # Saving generated box proposals to file. # Predicted box_proposals are in XYXY_ABS mode. bbox_mode = BoxMode.XYXY_ABS.value ids, boxes, objectness_logits = [], [], [] for prediction in predictions: ids.append(prediction["image_id"]) boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) proposal_data = { "boxes": boxes, "objectness_logits": objectness_logits, "ids": ids, "bbox_mode": bbox_mode, } with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: pickle.dump(proposal_data, f) if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating bbox proposals ...") res = {} areas = {"all": "", "small": "s", "medium": "m", "large": "l"} for limit in [100, 1000]: for area, suffix in areas.items(): stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) key = "AR{}@{:d}".format(suffix, limit) res[key] = float(stats["ar"].item() * 100) self._logger.info("Proposal metrics: \n" + create_small_table(res)) self._results["box_proposals"] = res def _derive_coco_results(self, coco_eval, iou_type, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format(iou_type) + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def _derive_refcoco_results(self, coco_eval, iou_type): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = {"bbox": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"], "segm": ["P@0.5", "P@0.6", "P@0.7", "P@0.8", "P@0.9", "oIoU", "mIoU"] }[iou_type] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float("nan") for idx, metric in enumerate(metrics) } ious = np.array([v for (k, v) in coco_eval.ious.items()]) total_intersection_area = coco_eval.total_intersection_area total_union_area = coco_eval.total_union_area iou_list = coco_eval.iou_list # compute metrics results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100 results["P@0.6"] = np.sum(ious > 0.6) / len(ious) * 100 results["P@0.7"] = np.sum(ious > 0.7) / len(ious) * 100 results["P@0.8"] = np.sum(ious > 0.8) / len(ious) * 100 results["P@0.9"] = np.sum(ious > 0.9) / len(ious) * 100 results["oIoU"] = total_intersection_area / total_union_area * 100 results["mIoU"] = np.mean(ious) * 100 # if iou_type == "bbox": # results["P@0.5"] = np.sum(ious > 0.5) / len(ious) * 100 # elif iou_type == "segm": # results["mIoU"] = np.mean(ious) * 100 # else: # raise ValueError("Unsupported iou_type!") self._logger.info( "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) ) # results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json(instances, img_id): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results # inspired from Detectron: # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
3
2023-12-22 13:31:33+00:00
24k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
15,779
scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return if self.cfg.sdf_bias != 0.0: threestudio.warn( "shape_init and sdf_bias are both specified, which may lead to unexpected results." ) get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
other: BaseGeometry,
1
2023-12-23 12:37:48+00:00
24k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found...
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,435
self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res]
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res]
if(is_fp16_enabled()):
6
2023-12-25 09:29:36+00:00
24k
iKala/ievals
ievals/cli/ieval.py
[ { "identifier": "TGI_Evaluator", "path": "ievals/modules/qa_evaluators/tgi.py", "snippet": "class TGI_Evaluator(Evaluator):\n def __init__(\n self,\n choices,\n k,\n ip_addr,\n model_name,\n systemMessageToken=\"<|im_start|>system\\n\",\n messageEndTok...
import os import logging import argparse import pandas as pd from datasets import load_dataset from ievals.modules.qa_evaluators.tgi import TGI_Evaluator from ievals.modules.qa_evaluators.gemini import Gemini_Evaluator from ievals.modules.qa_evaluators.claude import Claude_Evaluator from ievals.modules.qa_evaluators.azure import Azure_Evaluator from ievals.modules.qa_evaluators.oai_complete import GPT_Evaluator from ievals.modules.qa_evaluators.chatgpt import ChatGPT_Evaluator from ievals.modules.qa_evaluators.hf_chat import HF_Chat_Evaluator from ievals.modules.qa_evaluators.hf_base import ( Qwen_Evaluator, ) # we only use this for qwen base model from ievals.modules.qa_evaluators.ali_dashscope import DashScope_Evaluator from ievals.exp_executer import run_exp
20,400
elif series == "openai_complete": return GPT_Evaluator elif series == "gemini": return Gemini_Evaluator elif series == "hf_chat": # implement the chat function return HF_Chat_Evaluator elif series == "tgi": # implement the chat function return TGI_Evaluator l_model_name = model_name.lower() if "gemini" in model_name: return Gemini_Evaluator if "gpt-" in model_name: # its possible to match gpt-3.5-instruct, # but we don't really want to sacrifice more fixed params for that return ChatGPT_Evaluator elif "claude" in model_name: return Claude_Evaluator elif "Qwen" in model_name: if "chat" in l_model_name: return HF_Chat_Evaluator else: return Qwen_Evaluator elif "qwen" in model_name: return DashScope_Evaluator return TGI_Evaluator def get_parser(): parser = argparse.ArgumentParser(description="Run TMMLU+ evals") parser.add_argument("model", type=str, help="Name of the eval model") parser.add_argument("--series", type=str, default="") parser.add_argument("--dataset", type=str, default="ikala/tmmluplus") parser.add_argument("--choices", type=str, default="A,B,C,D") parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--api_key", type=str, default=None) parser.add_argument("--max_samples", type=int, default=None) parser.add_argument("--cache", action=argparse.BooleanOptionalAction, default=True) parser.add_argument( "--switch_zh_hans", action=argparse.BooleanOptionalAction, default=False ) parser.add_argument( "--ip_addr", type=str, default="", help="IP:PORT for text-generation-inference server", ) parser.add_argument("--sys_token", type=str, default="", help="system prompt token") parser.add_argument("--usr_token", type=str, default="", help="user starting token") parser.add_argument( "--ast_token", type=str, default="", help="assistant starting token" ) parser.add_argument( "--eos_token", type=str, default="", help="end-of-sentence token usually its <|endoftext|> or </s>, but you have to verify from hf model tokenizer.json", ) parser.add_argument("--hf_cache", type=str, default="", help="huggingface cache") return parser def main(): parser = get_parser() args = parser.parse_args() model_name = args.model if model_name == "supported": valid_model_names, _ = get_model_config() print(valid_model_names) exit(0) valid_choices = args.choices.split(",") eval_cls = get_evaluator(model_name, args.series) if "TGI" in str(eval_cls): if len(args.usr_token): prompt_config = { "systemMessageToken": args.sys_token, "userMessageToken": args.usr_token, "messageEndToken": args.eos_token, "assistantMessageToken": args.ast_token, } else: prompt_config = get_tgi_prompt_config(model_name) eval_ins = eval_cls( choices=valid_choices, k=args.top_k, ip_addr=args.ip_addr, model_name=model_name, switch_zh_hans=args.switch_zh_hans, **prompt_config, ) elif ("HF_Chat" in str(eval_cls)) or ("Qwen" in str(eval_cls)): eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) else: eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) postfix = model_name.split("/")[-1] if args.top_k > 0: postfix += f"_top_{args.top_k}" cache_path = None if args.cache: cache_path = ".cache" if args.top_k > 0: cache_path += f"_top_{args.top_k}"
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete": return GPT_Evaluator elif series == "gemini": return Gemini_Evaluator elif series == "hf_chat": # implement the chat function return HF_Chat_Evaluator elif series == "tgi": # implement the chat function return TGI_Evaluator l_model_name = model_name.lower() if "gemini" in model_name: return Gemini_Evaluator if "gpt-" in model_name: # its possible to match gpt-3.5-instruct, # but we don't really want to sacrifice more fixed params for that return ChatGPT_Evaluator elif "claude" in model_name: return Claude_Evaluator elif "Qwen" in model_name: if "chat" in l_model_name: return HF_Chat_Evaluator else: return Qwen_Evaluator elif "qwen" in model_name: return DashScope_Evaluator return TGI_Evaluator def get_parser(): parser = argparse.ArgumentParser(description="Run TMMLU+ evals") parser.add_argument("model", type=str, help="Name of the eval model") parser.add_argument("--series", type=str, default="") parser.add_argument("--dataset", type=str, default="ikala/tmmluplus") parser.add_argument("--choices", type=str, default="A,B,C,D") parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--api_key", type=str, default=None) parser.add_argument("--max_samples", type=int, default=None) parser.add_argument("--cache", action=argparse.BooleanOptionalAction, default=True) parser.add_argument( "--switch_zh_hans", action=argparse.BooleanOptionalAction, default=False ) parser.add_argument( "--ip_addr", type=str, default="", help="IP:PORT for text-generation-inference server", ) parser.add_argument("--sys_token", type=str, default="", help="system prompt token") parser.add_argument("--usr_token", type=str, default="", help="user starting token") parser.add_argument( "--ast_token", type=str, default="", help="assistant starting token" ) parser.add_argument( "--eos_token", type=str, default="", help="end-of-sentence token usually its <|endoftext|> or </s>, but you have to verify from hf model tokenizer.json", ) parser.add_argument("--hf_cache", type=str, default="", help="huggingface cache") return parser def main(): parser = get_parser() args = parser.parse_args() model_name = args.model if model_name == "supported": valid_model_names, _ = get_model_config() print(valid_model_names) exit(0) valid_choices = args.choices.split(",") eval_cls = get_evaluator(model_name, args.series) if "TGI" in str(eval_cls): if len(args.usr_token): prompt_config = { "systemMessageToken": args.sys_token, "userMessageToken": args.usr_token, "messageEndToken": args.eos_token, "assistantMessageToken": args.ast_token, } else: prompt_config = get_tgi_prompt_config(model_name) eval_ins = eval_cls( choices=valid_choices, k=args.top_k, ip_addr=args.ip_addr, model_name=model_name, switch_zh_hans=args.switch_zh_hans, **prompt_config, ) elif ("HF_Chat" in str(eval_cls)) or ("Qwen" in str(eval_cls)): eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) else: eval_ins = eval_cls( choices=valid_choices, k=args.top_k, api_key=args.api_key, model_name=model_name, switch_zh_hans=args.switch_zh_hans, ) postfix = model_name.split("/")[-1] if args.top_k > 0: postfix += f"_top_{args.top_k}" cache_path = None if args.cache: cache_path = ".cache" if args.top_k > 0: cache_path += f"_top_{args.top_k}"
run_exp(
7
2023-12-24 08:00:38+00:00
24k
kraina-ai/quackosm
tests/test_pbf_file_reader.py
[ { "identifier": "FEATURES_INDEX", "path": "quackosm/_constants.py", "snippet": "FEATURES_INDEX = \"feature_id\"" }, { "identifier": "OsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef m...
import platform import re import subprocess import warnings import duckdb import geopandas as gpd import pandas as pd import pyogrio import pytest import six from collections.abc import Iterable from pathlib import Path from typing import Optional, Union, cast from unittest import TestCase from parametrization import Parametrization as P from shapely import hausdorff_distance from shapely.geometry import MultiPolygon, Polygon from shapely.geometry.base import BaseGeometry from shapely.ops import unary_union from srai.geometry import remove_interiors from srai.loaders.download import download_file from srai.loaders.osm_loaders.filters import GEOFABRIK_LAYERS, HEX2VEC_FILTER from quackosm._constants import FEATURES_INDEX from quackosm._osm_tags_filters import OsmTagsFilter from quackosm.pbf_file_reader import PbfFileReader
17,849
"""Tests for PbfFileReader.""" ut = TestCase() LFS_DIRECTORY_URL = "https://github.com/kraina-ai/srai-test-files/raw/main/files/" @pytest.mark.parametrize( # type: ignore "test_file_name,query,expected_result_length,expected_features_columns_length", [ ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", None, 678, 271, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", None, 1, 22, ), ("529cdcbb7a3cc103658ef31b39bed24984e421127d319c867edf2f86ff3bb098.osm.pbf", None, 0, 0), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", HEX2VEC_FILTER, 97, 10, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", HEX2VEC_FILTER, 0, 0, ), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", GEOFABRIK_LAYERS, 433, 22, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", GEOFABRIK_LAYERS, 0, 0, ), ], ) def test_pbf_reader( test_file_name: str, query: OsmTagsFilter, expected_result_length: int, expected_features_columns_length: int, ): """Test proper files loading in `PbfFileReader`."""
"""Tests for PbfFileReader.""" ut = TestCase() LFS_DIRECTORY_URL = "https://github.com/kraina-ai/srai-test-files/raw/main/files/" @pytest.mark.parametrize( # type: ignore "test_file_name,query,expected_result_length,expected_features_columns_length", [ ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", None, 678, 271, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", None, 1, 22, ), ("529cdcbb7a3cc103658ef31b39bed24984e421127d319c867edf2f86ff3bb098.osm.pbf", None, 0, 0), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", HEX2VEC_FILTER, 97, 10, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", HEX2VEC_FILTER, 0, 0, ), ( "d17f922ed15e9609013a6b895e1e7af2d49158f03586f2c675d17b760af3452e.osm.pbf", GEOFABRIK_LAYERS, 433, 22, ), ( "eb2848d259345ce7dfe8af34fd1ab24503bb0b952e04e872c87c55550fa50fbf.osm.pbf", GEOFABRIK_LAYERS, 0, 0, ), ], ) def test_pbf_reader( test_file_name: str, query: OsmTagsFilter, expected_result_length: int, expected_features_columns_length: int, ): """Test proper files loading in `PbfFileReader`."""
features_gdf = PbfFileReader(tags_filter=query).get_features_gdf(
2
2023-12-28 11:26:41+00:00
24k
KyanChen/TTP
mmdet/configs/rtmdet/rtmdet_ins_s_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of t...
from mmengine.config import read_base from .rtmdet_ins_l_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmengine.hooks.ema_hook import EMAHook from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug) from mmdet.engine.hooks.pipeline_switch_hook import PipelineSwitchHook from mmdet.models.layers.ema import ExpMomentumEMA
18,215
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict( type=RandomCrop, crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True), dict(type=YOLOXHSVRandomAug),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict( type=RandomCrop, crop_size=(640, 640), recompute_bbox=True, allow_negative_crop=True), dict(type=YOLOXHSVRandomAug),
dict(type=RandomFlip, prob=0.5),
7
2023-12-23 08:36:47+00:00
24k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, ...
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
15,281
rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) parser.add_argument('--visemes', dest='visemes', action="store_true", default=False, help="train visemes only, lock the encoder and decoder") args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir set_logger(hps) if args.visemes: run_only_visemes(hps) # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) else: net_dur_disc = None if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) net_wd = WavLMDiscriminator( hps.model.slm.hidden, hps.model.slm.nlayers, hps.model.slm.initial_channel ).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_wd = torch.optim.AdamW( net_wd.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) net_wd = DDP(net_wd, device_ids=[local_rank], bucket_cap_mb=512) if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) dur_resume_lr = hps.train.learning_rate wd_resume_lr = hps.train.learning_rate if net_dur_disc is not None: try: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr except: print("Initialize dur_disc") try: _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 try: _, optim_wd, wd_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "WD_*.pth"), net_wd, optim_wd, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_wd.param_groups[0].get("initial_lr"): optim_wd.param_groups[0]["initial_lr"] = wd_resume_lr except Exception as e: print(e) scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_wd = torch.optim.lr_scheduler.ExponentialLR( optim_wd, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.bf16_run) wl = WavLMLoss( hps.model.slm.model, net_wd, hps.data.sampling_rate, hps.model.slm.sr, ).to(local_rank) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
mel = spec_to_mel_torch(
16
2023-12-27 03:09:11+00:00
24k
chinhsuanwu/ifusion-threestudio
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,266
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
other: BaseGeometry,
1
2023-12-27 20:30:33+00:00
24k
open-mmlab/Amphion
modules/wenet_extractor/squeezeformer/encoder.py
[ { "identifier": "DepthwiseConv2dSubsampling4", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class DepthwiseConv2dSubsampling4(BaseSubsampling):\n \"\"\"Depthwise Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n od...
import torch import torch.nn as nn from typing import Tuple, Union, Optional, List from modules.wenet_extractor.squeezeformer.subsampling import ( DepthwiseConv2dSubsampling4, TimeReductionLayer1D, TimeReductionLayer2D, TimeReductionLayerStream, ) from modules.wenet_extractor.squeezeformer.encoder_layer import ( SqueezeformerEncoderLayer, ) from modules.wenet_extractor.transformer.embedding import RelPositionalEncoding from modules.wenet_extractor.transformer.attention import MultiHeadedAttention from modules.wenet_extractor.squeezeformer.attention import ( RelPositionMultiHeadedAttention, ) from modules.wenet_extractor.squeezeformer.positionwise_feed_forward import ( PositionwiseFeedForward, ) from modules.wenet_extractor.squeezeformer.convolution import ConvolutionModule from modules.wenet_extractor.utils.mask import make_pad_mask, add_optional_chunk_mask from modules.wenet_extractor.utils.common import get_activation
14,578
pos_enc_layer_type (str): Self attention type. time_reduction_layer_type (str): Conv1d or Conv2d reduction layer. do_rel_shift (bool): Whether to do relative shift operation on rel-attention module. cnn_module_kernel (int): Kernel size of CNN module. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. cnn_module_kernel (int): Kernel size of convolution module. adaptive_scale (bool): Whether to use adaptive scale. init_weights (bool): Whether to initialize weights. causal (bool): whether to use causal convolution or not. """ super(SqueezeformerEncoder, self).__init__() self.global_cmvn = global_cmvn self.reduce_idx: Optional[Union[int, List[int]]] = ( [reduce_idx] if type(reduce_idx) == int else reduce_idx ) self.recover_idx: Optional[Union[int, List[int]]] = ( [recover_idx] if type(recover_idx) == int else recover_idx ) self.check_ascending_list() if reduce_idx is None: self.time_reduce = None else: if recover_idx is None: self.time_reduce = "normal" # no recovery at the end else: self.time_reduce = "recover" # recovery at the end assert len(self.reduce_idx) == len(self.recover_idx) self.reduce_stride = 2 self._output_size = output_size self.normalize_before = normalize_before self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk self.pos_enc_layer_type = pos_enc_layer_type activation = get_activation(activation_type) # self-attention module definition if pos_enc_layer_type != "rel_pos": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) else: encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, encoder_dim, attention_dropout_rate, do_rel_shift, adaptive_scale, init_weights, ) # feed-forward module definition positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( encoder_dim, encoder_dim * feed_forward_expansion_factor, feed_forward_dropout_rate, activation, adaptive_scale, init_weights, ) # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = ( encoder_dim, cnn_module_kernel, activation, cnn_norm_type, causal, True, adaptive_scale, init_weights, ) self.embed = DepthwiseConv2dSubsampling4( 1, encoder_dim, RelPositionalEncoding(encoder_dim, dropout_rate=0.1), dw_stride, input_size, input_dropout_rate, init_weights, ) self.preln = nn.LayerNorm(encoder_dim) self.encoders = torch.nn.ModuleList( [ SqueezeformerEncoderLayer( encoder_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), convolution_layer(*convolution_layer_args), positionwise_layer(*positionwise_layer_args), normalize_before, dropout, concat_after, ) for _ in range(num_blocks) ] ) if time_reduction_layer_type == "conv1d": time_reduction_layer = TimeReductionLayer1D time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } elif time_reduction_layer_type == "stream": time_reduction_layer = TimeReductionLayerStream time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } else:
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # class SqueezeformerEncoder(nn.Module): def __init__( self, input_size: int = 80, encoder_dim: int = 256, output_size: int = 256, attention_heads: int = 4, num_blocks: int = 12, reduce_idx: Optional[Union[int, List[int]]] = 5, recover_idx: Optional[Union[int, List[int]]] = 11, feed_forward_expansion_factor: int = 4, dw_stride: bool = False, input_dropout_rate: float = 0.1, pos_enc_layer_type: str = "rel_pos", time_reduction_layer_type: str = "conv1d", do_rel_shift: bool = True, feed_forward_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.1, cnn_module_kernel: int = 31, cnn_norm_type: str = "batch_norm", dropout: float = 0.1, causal: bool = False, adaptive_scale: bool = True, activation_type: str = "swish", init_weights: bool = True, global_cmvn: torch.nn.Module = None, normalize_before: bool = False, use_dynamic_chunk: bool = False, concat_after: bool = False, static_chunk_size: int = 0, use_dynamic_left_chunk: bool = False, ): """Construct SqueezeformerEncoder Args: input_size to use_dynamic_chunk, see in Transformer BaseEncoder. encoder_dim (int): The hidden dimension of encoder layer. output_size (int): The output dimension of final projection layer. attention_heads (int): Num of attention head in attention module. num_blocks (int): Num of encoder layers. reduce_idx Optional[Union[int, List[int]]]: reduce layer index, from 40ms to 80ms per frame. recover_idx Optional[Union[int, List[int]]]: recover layer index, from 80ms to 40ms per frame. feed_forward_expansion_factor (int): Enlarge coefficient of FFN. dw_stride (bool): Whether do depthwise convolution on subsampling module. input_dropout_rate (float): Dropout rate of input projection layer. pos_enc_layer_type (str): Self attention type. time_reduction_layer_type (str): Conv1d or Conv2d reduction layer. do_rel_shift (bool): Whether to do relative shift operation on rel-attention module. cnn_module_kernel (int): Kernel size of CNN module. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. cnn_module_kernel (int): Kernel size of convolution module. adaptive_scale (bool): Whether to use adaptive scale. init_weights (bool): Whether to initialize weights. causal (bool): whether to use causal convolution or not. """ super(SqueezeformerEncoder, self).__init__() self.global_cmvn = global_cmvn self.reduce_idx: Optional[Union[int, List[int]]] = ( [reduce_idx] if type(reduce_idx) == int else reduce_idx ) self.recover_idx: Optional[Union[int, List[int]]] = ( [recover_idx] if type(recover_idx) == int else recover_idx ) self.check_ascending_list() if reduce_idx is None: self.time_reduce = None else: if recover_idx is None: self.time_reduce = "normal" # no recovery at the end else: self.time_reduce = "recover" # recovery at the end assert len(self.reduce_idx) == len(self.recover_idx) self.reduce_stride = 2 self._output_size = output_size self.normalize_before = normalize_before self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk self.pos_enc_layer_type = pos_enc_layer_type activation = get_activation(activation_type) # self-attention module definition if pos_enc_layer_type != "rel_pos": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) else: encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, encoder_dim, attention_dropout_rate, do_rel_shift, adaptive_scale, init_weights, ) # feed-forward module definition positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( encoder_dim, encoder_dim * feed_forward_expansion_factor, feed_forward_dropout_rate, activation, adaptive_scale, init_weights, ) # convolution module definition convolution_layer = ConvolutionModule convolution_layer_args = ( encoder_dim, cnn_module_kernel, activation, cnn_norm_type, causal, True, adaptive_scale, init_weights, ) self.embed = DepthwiseConv2dSubsampling4( 1, encoder_dim, RelPositionalEncoding(encoder_dim, dropout_rate=0.1), dw_stride, input_size, input_dropout_rate, init_weights, ) self.preln = nn.LayerNorm(encoder_dim) self.encoders = torch.nn.ModuleList( [ SqueezeformerEncoderLayer( encoder_dim, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), convolution_layer(*convolution_layer_args), positionwise_layer(*positionwise_layer_args), normalize_before, dropout, concat_after, ) for _ in range(num_blocks) ] ) if time_reduction_layer_type == "conv1d": time_reduction_layer = TimeReductionLayer1D time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } elif time_reduction_layer_type == "stream": time_reduction_layer = TimeReductionLayerStream time_reduction_layer_args = { "channel": encoder_dim, "out_dim": encoder_dim, } else:
time_reduction_layer = TimeReductionLayer2D
2
2023-11-15 09:19:27+00:00
24k
BobaZooba/xllm
src/xllm/cli/quantize.py
[ { "identifier": "Config", "path": "src/xllm/core/config.py", "snippet": "class Config:\n \"\"\"\n The `Config` class serves as a comprehensive configuration schema for managing various parameters required during\n the setup and execution of experiments relating to language models, such as train...
from typing import Type from transformers import HfArgumentParser from ..core.config import Config from ..quantization.quantizer import Quantizer from ..run.quantize import quantize from ..utils.cli import setup_cli
16,282
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_run_quantize( config_cls: Type[Config] = Config, ) -> Quantizer: """ Provides a command-line interface (CLI) entry point for the quantization process of a pre-trained language model. This function allows users to execute model quantization from the command line, setting up the appropriate configuration through parsed arguments and invoking the `quantize` function. It also sets up the CLI environment, including logging configurations. Args: config_cls (Type[Config], defaults to `Config`): The configuration class used for parsing command-line arguments into a configuration object. This class should contain the parameters necessary for the quantization process such as model and tokenizer paths, and quantization specifications. Returns: Quantizer: An instance of the `Quantizer` class that has conducted the quantization process and stores the resulting quantized model. The function undergoes the following procedure: - Initializes an `HfArgumentParser` with `config_cls` for handling command-line arguments. - Parses the command-line arguments into an instance of the configuration object. - Sets up CLI interactions, which include logging outputs to a specified file (defaults to `./xllm_gptq_quantize.log`). - Executes the `quantize` function with the parsed configuration to perform model quantization. - Returns the `Quantizer` instance that now holds the quantized model and associated configurations. When this script is run as a main program (that is, `__name__ == "__main__"`), it will perform the following: - Parse CLI arguments into a configuration object using the provided `config_cls`. - Run the quantization process with logging to the file `xllm_gptq_quantize.log`. - Return the `Quantizer` instance with the quantized model ready for use or distribution. Example CLI usage: ```sh python cli_run_quantize.py --model_name_or_path my_model --gptq_bits 4 ``` Note: This function is designed to facilitate the simplification of the model quantization workflow through the CLI, intended for direct execution from the terminal or within scripting environments. """ parser = HfArgumentParser(config_cls) config = parser.parse_args_into_dataclasses()[0] setup_cli(config=config, logger_path="./xllm_gptq_quantize.log")
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_run_quantize( config_cls: Type[Config] = Config, ) -> Quantizer: """ Provides a command-line interface (CLI) entry point for the quantization process of a pre-trained language model. This function allows users to execute model quantization from the command line, setting up the appropriate configuration through parsed arguments and invoking the `quantize` function. It also sets up the CLI environment, including logging configurations. Args: config_cls (Type[Config], defaults to `Config`): The configuration class used for parsing command-line arguments into a configuration object. This class should contain the parameters necessary for the quantization process such as model and tokenizer paths, and quantization specifications. Returns: Quantizer: An instance of the `Quantizer` class that has conducted the quantization process and stores the resulting quantized model. The function undergoes the following procedure: - Initializes an `HfArgumentParser` with `config_cls` for handling command-line arguments. - Parses the command-line arguments into an instance of the configuration object. - Sets up CLI interactions, which include logging outputs to a specified file (defaults to `./xllm_gptq_quantize.log`). - Executes the `quantize` function with the parsed configuration to perform model quantization. - Returns the `Quantizer` instance that now holds the quantized model and associated configurations. When this script is run as a main program (that is, `__name__ == "__main__"`), it will perform the following: - Parse CLI arguments into a configuration object using the provided `config_cls`. - Run the quantization process with logging to the file `xllm_gptq_quantize.log`. - Return the `Quantizer` instance with the quantized model ready for use or distribution. Example CLI usage: ```sh python cli_run_quantize.py --model_name_or_path my_model --gptq_bits 4 ``` Note: This function is designed to facilitate the simplification of the model quantization workflow through the CLI, intended for direct execution from the terminal or within scripting environments. """ parser = HfArgumentParser(config_cls) config = parser.parse_args_into_dataclasses()[0] setup_cli(config=config, logger_path="./xllm_gptq_quantize.log")
quantizer = quantize(config=config)
2
2023-11-10 17:55:03+00:00
24k