File size: 4,692 Bytes
34df37c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
#!/usr/bin/env python3
import argparse
import json
from pathlib import Path
import re
from safetensors import safe_open
from gr00t.configs.model.gr00t_n1d6 import Gr00tN1d6Config
LAYER_RE = re.compile(r"\.language_model\.model\.layers\.(\d+)\.")
def _tensor_numel(shape):
numel = 1
for dim in shape:
numel *= dim
return numel
def _layer_index(param_name):
match = LAYER_RE.search(param_name)
if not match:
return None
return int(match.group(1))
def _is_trainable(name, cfg, total_layers):
# Action head rules
if name.startswith("action_head."):
if name.startswith("action_head.model."):
return cfg.tune_diffusion_model
if name.startswith("action_head.vlln."):
return cfg.tune_vlln
if name.startswith("action_head.state_encoder."):
return cfg.tune_projector
if name.startswith("action_head.action_encoder."):
return cfg.tune_projector
if name.startswith("action_head.action_decoder."):
return cfg.tune_projector
if name.startswith("action_head.position_embedding."):
return cfg.tune_projector
if name.startswith("action_head.mask_token"):
return cfg.tune_projector
# Default to trainable for other action head params
return True
# Backbone rules
if name.startswith("backbone."):
if name.startswith("backbone.model.vision_model.") or name.startswith(
"backbone.model.mlp1."
):
return cfg.tune_visual
if name.startswith("backbone.model.language_model."):
if cfg.tune_llm:
return True
layer_idx = _layer_index(name)
if layer_idx is None:
return False
if cfg.tune_top_llm_layers <= 0:
return False
return layer_idx >= max(total_layers - cfg.tune_top_llm_layers, 0)
# Default: frozen if not clearly part of tunable submodules
return False
# Non-backbone/non-action_head params (if any)
return True
def _load_shapes_from_safetensors(file_path, wanted_keys=None):
shapes = {}
with safe_open(str(file_path), framework="pt", device="cpu") as f:
keys = wanted_keys if wanted_keys is not None else f.keys()
for key in keys:
if key not in f.keys():
continue
# Avoid loading full tensor; just get shape.
shapes[key] = tuple(f.get_slice(key).get_shape())
return shapes
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, help="Path to Gr00tN1d6 config.json")
args = parser.parse_args()
config_path = Path(args.config)
with open(config_path, "r") as f:
cfg_dict = json.load(f)
cfg = Gr00tN1d6Config(**cfg_dict)
model_dir = config_path.parent
index_path = model_dir / "model.safetensors.index.json"
safetensors_path = model_dir / "model.safetensors"
shapes = {}
if index_path.exists():
with open(index_path, "r") as f:
index = json.load(f)
weight_map = index.get("weight_map", {})
keys_by_file = {}
for key, fname in weight_map.items():
keys_by_file.setdefault(fname, []).append(key)
for fname, keys in keys_by_file.items():
shapes.update(_load_shapes_from_safetensors(model_dir / fname, keys))
elif safetensors_path.exists():
shapes.update(_load_shapes_from_safetensors(safetensors_path))
else:
raise FileNotFoundError(
"No safetensors found. Expected model.safetensors.index.json or model.safetensors "
f"in {model_dir}"
)
# If select_layer truncates the LLM, ignore layers beyond it.
if cfg.select_layer is not None and cfg.select_layer > 0:
pruned = {}
for name, shape in shapes.items():
layer_idx = _layer_index(name)
if layer_idx is not None and layer_idx >= cfg.select_layer:
continue
pruned[name] = shape
shapes = pruned
# Determine total layers from remaining keys
layer_indices = sorted(
{idx for name in shapes.keys() if (idx := _layer_index(name)) is not None}
)
total_layers = (max(layer_indices) + 1) if layer_indices else 0
total = 0
trainable = 0
for name, shape in shapes.items():
count = _tensor_numel(shape)
total += count
if _is_trainable(name, cfg, total_layers):
trainable += count
print(f"Total params: {total:,}")
print(f"Trainable params: {trainable:,}")
if __name__ == "__main__":
main()
|