dwko commited on
Commit
844a84b
·
verified ·
1 Parent(s): 026dcf0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +15 -1
README.md CHANGED
@@ -14,20 +14,27 @@ import torch
14
  import numpy as np
15
 
16
  from alpamayo_r1.models.alpamayo_r1 import AlpamayoR1
 
17
  from alpamayo_r1.load_physical_aiavdataset import load_physical_aiavdataset
 
18
  from alpamayo_r1 import helper
19
 
20
 
21
  model_path = "Alpamayo-R1-10B-4bit"
 
22
  model = AlpamayoR1.from_pretrained(model_path, dtype=torch.bfloat16).to("cuda")
 
23
  processor = helper.get_processor(model.tokenizer)
24
 
25
 
26
  clip_id = "030c760c-ae38-49aa-9ad8-f5650a545d26"
 
27
  print(f"Loading dataset for clip_id: {clip_id}...")
 
28
  data = load_physical_aiavdataset(clip_id, t0_us=15_100_000,num_frames=1)
29
- print(f"{data}")
30
  print("Dataset loaded.")
 
31
  messages = helper.create_message(data["image_frames"].flatten(0, 1))
32
 
33
  inputs = processor.apply_chat_template(
@@ -48,7 +55,9 @@ model_inputs = {
48
  model_inputs = helper.to_device(model_inputs, "cuda")
49
 
50
  torch.cuda.manual_seed_all(42)
 
51
  with torch.autocast("cuda", dtype=torch.bfloat16):
 
52
  pred_xyz, pred_rot, extra = model.sample_trajectories_from_data_with_vlm_rollout(
53
  data=model_inputs,
54
  top_p=0.98,
@@ -62,10 +71,15 @@ with torch.autocast("cuda", dtype=torch.bfloat16):
62
  print("Chain-of-Causation (per trajectory):\n", extra["cot"][0])
63
 
64
  gt_xy = data["ego_future_xyz"].cpu()[0, 0, :, :2].T.numpy()
 
65
  pred_xy = pred_xyz.cpu().numpy()[0, 0, :, :, :2].transpose(0, 2, 1)
 
66
  diff = np.linalg.norm(pred_xy - gt_xy[None, ...], axis=1).mean(-1)
 
67
  min_ade = diff.min()
 
68
  print("minADE:", min_ade, "meters")
 
69
  print(
70
  "Note: VLA-reasoning models produce nondeterministic outputs due to trajectory sampling, "
71
  "hardware differences, etc. With num_traj_samples=1 (set for GPU memory compatibility), "
 
14
  import numpy as np
15
 
16
  from alpamayo_r1.models.alpamayo_r1 import AlpamayoR1
17
+
18
  from alpamayo_r1.load_physical_aiavdataset import load_physical_aiavdataset
19
+
20
  from alpamayo_r1 import helper
21
 
22
 
23
  model_path = "Alpamayo-R1-10B-4bit"
24
+
25
  model = AlpamayoR1.from_pretrained(model_path, dtype=torch.bfloat16).to("cuda")
26
+
27
  processor = helper.get_processor(model.tokenizer)
28
 
29
 
30
  clip_id = "030c760c-ae38-49aa-9ad8-f5650a545d26"
31
+
32
  print(f"Loading dataset for clip_id: {clip_id}...")
33
+
34
  data = load_physical_aiavdataset(clip_id, t0_us=15_100_000,num_frames=1)
35
+
36
  print("Dataset loaded.")
37
+
38
  messages = helper.create_message(data["image_frames"].flatten(0, 1))
39
 
40
  inputs = processor.apply_chat_template(
 
55
  model_inputs = helper.to_device(model_inputs, "cuda")
56
 
57
  torch.cuda.manual_seed_all(42)
58
+
59
  with torch.autocast("cuda", dtype=torch.bfloat16):
60
+
61
  pred_xyz, pred_rot, extra = model.sample_trajectories_from_data_with_vlm_rollout(
62
  data=model_inputs,
63
  top_p=0.98,
 
71
  print("Chain-of-Causation (per trajectory):\n", extra["cot"][0])
72
 
73
  gt_xy = data["ego_future_xyz"].cpu()[0, 0, :, :2].T.numpy()
74
+
75
  pred_xy = pred_xyz.cpu().numpy()[0, 0, :, :, :2].transpose(0, 2, 1)
76
+
77
  diff = np.linalg.norm(pred_xy - gt_xy[None, ...], axis=1).mean(-1)
78
+
79
  min_ade = diff.min()
80
+
81
  print("minADE:", min_ade, "meters")
82
+
83
  print(
84
  "Note: VLA-reasoning models produce nondeterministic outputs due to trajectory sampling, "
85
  "hardware differences, etc. With num_traj_samples=1 (set for GPU memory compatibility), "