first_stage / main_parallel.py
Yimingbear's picture
Upload folder using huggingface_hub
f9073ae verified
import os
import logging
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import datetime
import torch.nn.functional as F
from contextlib import nullcontext
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler
from ml_collections.config_dict import ConfigDict
from rna_dataset import RNADataset, RNACollater, RNACollater_v2
from dmt import DMT
from torch_scatter import scatter
from torch.optim.lr_scheduler import CosineAnnealingLR
from collections import deque
from torch.cuda.amp import GradScaler
# CUDA_VISIBLE_DEVICES=0,1 torchrun --nproc_per_node=2 main_parallel.py
# CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --nproc_per_node=4 main_parallel.py
# CUDA_VISIBLE_DEVICES=4,5,6,7,8,9 torchrun --nnodes=1 --nproc_per_node=6 main_parallel.py --rdzv_backend=c10d --rdzv_endpoint=localhost:0
def info_nce_loss(z1, z2, temperature=0.05):
z1 = F.normalize(z1, dim=1)
z2 = F.normalize(z2, dim=1)
B = z1.size(0)
z = torch.cat([z1, z2], dim=0) # (2B, D)
sim = torch.matmul(z, z.T) / temperature # (2B, 2B)
mask = torch.eye(2 * B, device=z.device, dtype=torch.bool)
sim.masked_fill_(mask, -9e15)
pos_idx = torch.cat([torch.arange(B, 2 * B), torch.arange(0, B)]).to(z.device)
pos_sim = sim[torch.arange(2 * B), pos_idx]
# Uncomment the following lines if you want to mask out the positive pairs
# pos_mask = torch.zeros_like(sim, dtype=torch.bool)
# pos_mask[torch.arange(2 * B), pos_idx] = True
# sim.masked_fill_(pos_mask, -9e15)
loss = -pos_sim + torch.logsumexp(sim, dim=1)
return loss.mean()
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [
torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
torch.distributed.all_reduce(all_gradients)
return all_gradients[torch.distributed.get_rank()]
def all_gather_with_grad(tensors):
"""
Performs all_gather operation on the provided tensors.
Graph remains connected for backward grad computation.
"""
# Queue the gathered tensors
world_size = torch.distributed.get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_all = GatherLayer.apply(tensors)
return torch.cat(tensor_all, dim=0)
def train(configs):
local_rank, world_size, rank = int(os.environ["LOCAL_RANK"]), int(os.environ["WORLD_SIZE"]), int(os.environ["RANK"])
# os.environ['MASTER_ADDR'] = 'localhost'
# os.environ['MASTER_PORT'] = '29500'
dist.init_process_group("gloo", rank=rank, world_size=world_size)
torch.cuda.set_device(local_rank)
date_str = datetime.datetime.now().strftime("%Y%m%d_%H%M")
if rank == 0:
total_batch_size = world_size * configs.batch_size
log_dir = f"/home/hui007/rna/first_stage/log/train_{date_str}_totalbatchsize{total_batch_size}"
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(log_dir, f"train_{date_str}.log")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# 移除已有 handler
for handler in logger.handlers[:]:
logger.removeHandler(handler)
file_handler = logging.FileHandler(log_path, mode='a')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info(f"Using DDP with total {world_size} GPUS...")
# z1_queue = deque(maxlen=configs.memory_size)
# z2_queue = deque(maxlen=configs.memory_size)
model = DMT(configs)
resume_path = configs.get('resume_path', None)
if resume_path is not None and os.path.exists(resume_path):
model.load_state_dict(torch.load(resume_path, map_location=f"cuda:{local_rank}"))
if rank == 0:
logging.info(f"Loaded checkpoint from {resume_path}")
device = torch.device(f'cuda:{local_rank}')
model = model.to(device)
ddp_model = DDP(model, device_ids=[local_rank], find_unused_parameters=True)
# if rank == 0:
# torch.save(ddp_model.module.state_dict(), f"/home/hui007/rna/first_stage/model_checkpoint/test.pth")
dataset = RNADataset('/home/hui007/rna/rna_repr/zhiyuan/train_data_final.npz', protenix_emb_path='/home/hui007/rna/first_stage/scaled_tiny_protenix_emb_1d')
sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank)
dataloader = DataLoader(dataset, batch_size=configs.batch_size, sampler=sampler, drop_last=True, collate_fn=RNACollater_v2())
# optimizer = optim.SGD(ddp_model.parameters(), lr=0.01)
optimizer = torch.optim.AdamW(ddp_model.parameters(), lr=1e-5, weight_decay=0.01)
scheduler = CosineAnnealingLR(optimizer, T_max=configs.epoch, eta_min=1e-6)
global_step = 0
loss_accum = 0.0
contrast_loss_accum = 0.0
denoising_loss_accum = 0.0
count_accum = 0
scaler = GradScaler(enabled=(configs.dtype in ["fp16", "bf16"]))
for epoch in range(configs.epoch):
sampler.set_epoch(epoch)
ddp_model.train()
for g1, g2 in dataloader:
g1 = g1.to(device)
g2 = g2.to(device)
eval_precision = {
"fp32": torch.float32,
"bf16": torch.bfloat16,
"fp16": torch.float16,
}[configs.dtype]
enable_amp = (
torch.autocast(device_type="cuda", dtype=eval_precision)
if torch.cuda.is_available()
else nullcontext()
)
optimizer.zero_grad()
with enable_amp:
z1, denoising_loss1 = ddp_model(g1)
z2, denoising_loss2 = ddp_model(g2)
z1_all = all_gather_with_grad(z1)
z2_all = all_gather_with_grad(z2)
# if len(msa_queue) > 0:
# msa_output_extended = msa_output + list(msa_queue)
# dmt_output_extended = dmt_output + list(dmt_queue)
# else:
# msa_output_extended = msa_output
# dmt_output_extended = dmt_output
# 更新队列(detach 防止梯度传播
# for x in msa_output:
# msa_queue.append(x.detach())
# for x in dmt_output:
# dmt_queue.append(x.detach())
contrast_loss = info_nce_loss(z1_all, z2_all, temperature=0.05)
denoising_loss = (denoising_loss1 + denoising_loss2) / 2
loss = 0.5 * contrast_loss + denoising_loss
# print(loss, loss.requires_grad, loss.grad_fn)
# loss.backward()
scaler.scale(loss).backward()
# torch.nn.utils.clip_grad_norm_(ddp_model.parameters(), max_norm=1.0) # 梯度裁剪
# optimizer.step()
scaler.step(optimizer)
scaler.update()
global_step += 1
if rank == 0:
print(f"Epoch [{epoch}/{configs.epoch}], Step [{global_step}], Loss: {loss.item():.4f}")
contrast_loss_accum += contrast_loss.item()
denoising_loss_accum += denoising_loss.item()
loss_accum += loss.item()
count_accum += 1
if global_step % 5 == 0:
avg_loss = loss_accum / count_accum
avg_denoising_loss = denoising_loss_accum / count_accum
avg_contrast_loss = contrast_loss_accum / count_accum
print(f"Epoch: {epoch}, Step: {global_step}, Avg Loss: {avg_loss:.4f}")
logging.info(f"Epoch: [{epoch}/{configs.epoch}], Step: {global_step}, avg_contrast_loss: {avg_contrast_loss:.4f}, avg_denoising_loss: {avg_denoising_loss:.4f}, Avg Loss: {avg_loss:.4f}")
loss_accum = 0.0
contrast_loss_accum = 0.0
denoising_loss_accum = 0.0
count_accum = 0
if rank == 0 and epoch % 100 == 0:
os.makedirs(f'/home/hui007/rna/first_stage/model_checkpoint/{date_str}', exist_ok=True)
torch.save(ddp_model.module.state_dict(), f"/home/hui007/rna/first_stage/model_checkpoint/{date_str}/epoch{epoch}_{date_str}.pth")
scheduler.step()
torch.cuda.empty_cache()
dist.destroy_process_group()
def main():
configs = {
# 'resume_path': '/home/hui007/rna/model_checkpoint/torchrun_model_epoch_15_20250610_1732.pth',
'resume_path': None,
'epoch': 10000,
'batch_size': 64, # per GPU
# 'memory_size': 16,
"dtype": "bf16", # default training dtype: bf16
'new_aa': True,
'sqrt_dis': False,
'hidden_dim': 384,
'e2n_ratio': 6,
'n_blocks': 12,
'in_res_node_features': 4,
'pos_mask_type': 'none',
'enable_llm': False,
'use_struc_emb': False,
'pos_dim': 72,
'mlp_ratio': 4,
'disable_dist': False,
'dist_mask_type': 'none',
'n_heads': 8,
'in_res_edge_features': 5,
'not_pair_update': False,
'dropout': 0.0,
'trans_ver': 'v3',
"use_protenix_emb": True
}
configs = ConfigDict(configs)
train(configs)
if __name__ == "__main__":
main()