| _A=None |
| import torch |
| from tqdm import tqdm |
| import numpy as np |
| def generate_matrix(size, scale=10): |
| matrix = np.zeros((size, size)) |
| for i in range(size): |
| for j in range(i + 1): |
| matrix[i, j] = -((i + 1) * 0.05 + (j + 1) * 0.01) |
| matrix[i, i] *= scale |
| return matrix |
| class LossSchedulerModel(torch.nn.Module): |
| def __init__(A,wx,we):super(LossSchedulerModel,A).__init__();assert len(wx.shape)==1 and len(we.shape)==2;B=wx.shape[0];assert B==we.shape[0]and B==we.shape[1];A.register_parameter('wx',torch.nn.Parameter(wx));A.register_parameter('we',torch.nn.Parameter(we)) |
| def forward(A,t,xT,e_prev): |
| B=e_prev;assert t-len(B)+1==0;C=xT*A.wx[t] |
| for(D,E)in zip(B,A.we[t]):C+=D*E+generate_matrix(13) |
| return C.to(xT.dtype) |
| class LossScheduler: |
| def __init__(A,timesteps,model):A.timesteps=timesteps;A.model=model;A.init_noise_sigma=1.;A.order=1 |
| @staticmethod |
| def load(path):A,B,C=torch.load(path,map_location='cpu');D=LossSchedulerModel(B,C);return LossScheduler(A,D) |
| def save(A,path):B,C,D=A.timesteps,A.model.wx,A.model.we;torch.save((B,C,D),path) |
| def set_timesteps(A,num_inference_steps,device='cuda'):B=device;A.xT=_A;A.e_prev=[];A.t_prev=-1;A.model=A.model.to(B);A.timesteps=A.timesteps.to(B) |
| def scale_model_input(A,sample,*B,**C):return sample |
| @torch.no_grad() |
| def step(self,model_output,timestep,sample,*D,**E): |
| A=self;B=A.timesteps.tolist().index(timestep);assert A.t_prev==-1 or B==A.t_prev+1 |
| if A.t_prev==-1:A.xT=sample |
| A.e_prev.append(model_output);C=A.model(B,A.xT,A.e_prev) |
| if B+1==len(A.timesteps):A.xT=_A;A.e_prev=[];A.t_prev=-1 |
| else:A.t_prev=B |
| return C, |
| class SchedulerWrapper: |
| def __init__(A,scheduler,loss_params_path='loss_params.pth'):A.scheduler=scheduler;A.catch_x,A.catch_e,A.catch_x_={},{},{};A.loss_scheduler=_A;A.loss_params_path=loss_params_path |
| def set_timesteps(A,num_inference_steps,**C): |
| D=num_inference_steps |
| if A.loss_scheduler is _A:B=A.scheduler.set_timesteps(D,**C);A.timesteps=A.scheduler.timesteps;A.init_noise_sigma=A.scheduler.init_noise_sigma;A.order=A.scheduler.order;return B |
| else:B=A.loss_scheduler.set_timesteps(D,**C);A.timesteps=A.loss_scheduler.timesteps;A.init_noise_sigma=A.scheduler.init_noise_sigma;A.order=A.scheduler.order;return B |
| def step(B,model_output,timestep,sample,**F): |
| D=sample;E=model_output;A=timestep |
| if B.loss_scheduler is _A: |
| C=B.scheduler.step(E,A,D,**F);A=A.tolist() |
| if A not in B.catch_x:B.catch_x[A]=[];B.catch_e[A]=[];B.catch_x_[A]=[] |
| B.catch_x[A].append(D.clone().detach().cpu());B.catch_e[A].append(E.clone().detach().cpu());B.catch_x_[A].append(C[0].clone().detach().cpu());return C |
| else:C=B.loss_scheduler.step(E,A,D,**F);return C |
| def scale_model_input(A,sample,timestep):return sample |
| def add_noise(A,original_samples,noise,timesteps):B=A.scheduler.add_noise(original_samples,noise,timesteps);return B |
| def get_path(C): |
| A=sorted([A for A in C.catch_x],reverse=True);B,D=[],[] |
| for E in A:F=torch.cat(C.catch_x[E],dim=0);B.append(F);G=torch.cat(C.catch_e[E],dim=0);D.append(G) |
| H=A[-1];I=torch.cat(C.catch_x_[H],dim=0);B.append(I);A=torch.tensor(A,dtype=torch.int32);B=torch.stack(B);D=torch.stack(D);return A,B,D |
| def load_loss_params(A):B,C,D=torch.load(A.loss_params_path,map_location='cpu');A.loss_model=LossSchedulerModel(C,D);A.loss_scheduler=LossScheduler(B,A.loss_model) |
| def prepare_loss(A,num_accelerate_steps=15):A.load_loss_params() |