|
|
|
|
|
import argparse |
|
|
import os |
|
|
import sys |
|
|
import time |
|
|
from pathlib import Path |
|
|
|
|
|
import torch |
|
|
import yaml |
|
|
|
|
|
PROJECT_ROOT = Path(__file__).resolve().parents[1] |
|
|
if str(PROJECT_ROOT) not in sys.path: |
|
|
sys.path.insert(0, str(PROJECT_ROOT)) |
|
|
|
|
|
os.environ.setdefault("LEROBOT_VIDEO_BACKEND", "pyav") |
|
|
|
|
|
from models.hrdt_runner import HRDTRunner |
|
|
from models.encoder.dinosiglip_vit import DinoSigLIPViTBackbone |
|
|
from hrdt_datasets.dataset import VLAConsumerDataset, DataCollatorForVLAConsumerDataset |
|
|
from torch.utils.data import DataLoader |
|
|
|
|
|
|
|
|
def main() -> None: |
|
|
parser = argparse.ArgumentParser(description="Overfit a single batch to sanity check loss.") |
|
|
parser.add_argument("--data_root", default="/hfm/data/pick_box") |
|
|
parser.add_argument("--config_path", default="configs/hrdt_finetune_lerobot.yaml") |
|
|
parser.add_argument("--pretrained_backbone_path", required=True) |
|
|
parser.add_argument("--vision_encoder", default="dino-siglip") |
|
|
parser.add_argument("--device", default="cuda:0") |
|
|
parser.add_argument("--steps", type=int, default=20) |
|
|
parser.add_argument("--lr", type=float, default=1e-4) |
|
|
parser.add_argument("--batch_size", type=int, default=2) |
|
|
parser.add_argument("--num_workers", type=int, default=1) |
|
|
parser.add_argument("--use_precomp_lang_embed", action="store_true") |
|
|
args = parser.parse_args() |
|
|
|
|
|
with open(args.config_path, "r") as f: |
|
|
config = yaml.safe_load(f) |
|
|
|
|
|
device = torch.device(args.device) |
|
|
|
|
|
vision_encoder = DinoSigLIPViTBackbone( |
|
|
vision_backbone_id=args.vision_encoder, |
|
|
image_resize_strategy="letterbox" |
|
|
if config["dataset"]["image_aspect_ratio"] == "pad" |
|
|
else "resize-naive", |
|
|
default_image_size=384, |
|
|
).to(device) |
|
|
vision_encoder.eval() |
|
|
image_transform = vision_encoder.get_image_transform() |
|
|
|
|
|
dataset = VLAConsumerDataset( |
|
|
config=config, |
|
|
image_transform=image_transform, |
|
|
num_cameras=config["common"]["num_cameras"], |
|
|
image_aug=False, |
|
|
dataset_type="finetune", |
|
|
dataset_name="lerobot", |
|
|
dataset_root=args.data_root, |
|
|
use_precomp_lang_embed=args.use_precomp_lang_embed, |
|
|
upsample_rate=1, |
|
|
) |
|
|
collator = DataCollatorForVLAConsumerDataset(use_precomp_lang_embed=args.use_precomp_lang_embed) |
|
|
loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, collate_fn=collator) |
|
|
loader = DataLoader( |
|
|
dataset, |
|
|
batch_size=args.batch_size, |
|
|
shuffle=True, |
|
|
collate_fn=collator, |
|
|
num_workers=args.num_workers, |
|
|
pin_memory=True, |
|
|
persistent_workers=args.num_workers > 0, |
|
|
) |
|
|
batch = next(iter(loader)) |
|
|
|
|
|
if not Path(args.pretrained_backbone_path).exists(): |
|
|
alt_path = Path("./checkpoints/pretrain-0618/checkpoint-500000/pytorch_model.bin") |
|
|
if alt_path.exists(): |
|
|
print(f"[WARN] Using fallback pretrained backbone at {alt_path}") |
|
|
args.pretrained_backbone_path = str(alt_path) |
|
|
else: |
|
|
raise FileNotFoundError( |
|
|
f"Pretrained backbone not found: {args.pretrained_backbone_path}" |
|
|
) |
|
|
|
|
|
hrdt = HRDTRunner( |
|
|
state_dim=config["common"]["state_dim"], |
|
|
action_dim=config["common"]["action_dim"], |
|
|
pred_horizon=config["common"]["action_chunk_size"], |
|
|
config=config["model"], |
|
|
act_pos_emb_config=[("state", 1), ("action", config["common"]["action_chunk_size"])], |
|
|
img_pos_emb_config=[ |
|
|
("image", (config["common"]["img_history_size"], config["common"]["num_cameras"], -vision_encoder.num_patches)), |
|
|
], |
|
|
lang_pos_emb_config=[ |
|
|
("language", -config["dataset"]["tokenizer_max_length"]), |
|
|
], |
|
|
max_img_len=config["common"]["img_history_size"] |
|
|
* config["common"]["num_cameras"] |
|
|
* vision_encoder.num_patches, |
|
|
max_lang_len=config["dataset"]["tokenizer_max_length"], |
|
|
training_mode="lang", |
|
|
mode="finetune", |
|
|
pretrained_backbone_path=args.pretrained_backbone_path, |
|
|
dtype=torch.float32, |
|
|
).to(device) |
|
|
|
|
|
optimizer = torch.optim.AdamW(hrdt.parameters(), lr=args.lr) |
|
|
|
|
|
images = batch["images"] |
|
|
with torch.no_grad(): |
|
|
k = next(iter(images)) |
|
|
batch_size, _, C, H, W = images[k].shape |
|
|
for key in images: |
|
|
images[key] = images[key].to(device).view(-1, C, H, W) |
|
|
image_features = vision_encoder(images).detach() |
|
|
image_features = image_features.view((batch_size, -1, vision_encoder.embed_dim)) |
|
|
|
|
|
states = batch["states"].to(device) |
|
|
actions = batch["actions"].to(device) |
|
|
lang_embeds = batch.get("lang_embeds") |
|
|
lang_attn_mask = batch.get("lang_attn_mask") |
|
|
if lang_embeds is not None: |
|
|
lang_embeds = lang_embeds.to(device) |
|
|
if lang_attn_mask is not None: |
|
|
lang_attn_mask = lang_attn_mask.to(device) |
|
|
|
|
|
for step in range(args.steps): |
|
|
t0 = time.time() |
|
|
loss_dict = hrdt.compute_loss( |
|
|
state_tokens=states, |
|
|
action_gt=actions, |
|
|
image_tokens=image_features, |
|
|
lang_tokens=lang_embeds, |
|
|
lang_attn_mask=lang_attn_mask, |
|
|
) |
|
|
loss = loss_dict["loss"] |
|
|
optimizer.zero_grad(set_to_none=True) |
|
|
loss.backward() |
|
|
optimizer.step() |
|
|
dt = time.time() - t0 |
|
|
print(f"step={step:03d} loss={loss.item():.6f} dt={dt:.3f}s") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|