zhenyuzhao's picture
Add files using upload-large-folder tool
34df37c verified
#!/usr/bin/env python3
import argparse
import json
from pathlib import Path
import re
from safetensors import safe_open
from gr00t.configs.model.gr00t_n1d6 import Gr00tN1d6Config
LAYER_RE = re.compile(r"\.language_model\.model\.layers\.(\d+)\.")
def _tensor_numel(shape):
numel = 1
for dim in shape:
numel *= dim
return numel
def _layer_index(param_name):
match = LAYER_RE.search(param_name)
if not match:
return None
return int(match.group(1))
def _is_trainable(name, cfg, total_layers):
# Action head rules
if name.startswith("action_head."):
if name.startswith("action_head.model."):
return cfg.tune_diffusion_model
if name.startswith("action_head.vlln."):
return cfg.tune_vlln
if name.startswith("action_head.state_encoder."):
return cfg.tune_projector
if name.startswith("action_head.action_encoder."):
return cfg.tune_projector
if name.startswith("action_head.action_decoder."):
return cfg.tune_projector
if name.startswith("action_head.position_embedding."):
return cfg.tune_projector
if name.startswith("action_head.mask_token"):
return cfg.tune_projector
# Default to trainable for other action head params
return True
# Backbone rules
if name.startswith("backbone."):
if name.startswith("backbone.model.vision_model.") or name.startswith(
"backbone.model.mlp1."
):
return cfg.tune_visual
if name.startswith("backbone.model.language_model."):
if cfg.tune_llm:
return True
layer_idx = _layer_index(name)
if layer_idx is None:
return False
if cfg.tune_top_llm_layers <= 0:
return False
return layer_idx >= max(total_layers - cfg.tune_top_llm_layers, 0)
# Default: frozen if not clearly part of tunable submodules
return False
# Non-backbone/non-action_head params (if any)
return True
def _load_shapes_from_safetensors(file_path, wanted_keys=None):
shapes = {}
with safe_open(str(file_path), framework="pt", device="cpu") as f:
keys = wanted_keys if wanted_keys is not None else f.keys()
for key in keys:
if key not in f.keys():
continue
# Avoid loading full tensor; just get shape.
shapes[key] = tuple(f.get_slice(key).get_shape())
return shapes
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, help="Path to Gr00tN1d6 config.json")
args = parser.parse_args()
config_path = Path(args.config)
with open(config_path, "r") as f:
cfg_dict = json.load(f)
cfg = Gr00tN1d6Config(**cfg_dict)
model_dir = config_path.parent
index_path = model_dir / "model.safetensors.index.json"
safetensors_path = model_dir / "model.safetensors"
shapes = {}
if index_path.exists():
with open(index_path, "r") as f:
index = json.load(f)
weight_map = index.get("weight_map", {})
keys_by_file = {}
for key, fname in weight_map.items():
keys_by_file.setdefault(fname, []).append(key)
for fname, keys in keys_by_file.items():
shapes.update(_load_shapes_from_safetensors(model_dir / fname, keys))
elif safetensors_path.exists():
shapes.update(_load_shapes_from_safetensors(safetensors_path))
else:
raise FileNotFoundError(
"No safetensors found. Expected model.safetensors.index.json or model.safetensors "
f"in {model_dir}"
)
# If select_layer truncates the LLM, ignore layers beyond it.
if cfg.select_layer is not None and cfg.select_layer > 0:
pruned = {}
for name, shape in shapes.items():
layer_idx = _layer_index(name)
if layer_idx is not None and layer_idx >= cfg.select_layer:
continue
pruned[name] = shape
shapes = pruned
# Determine total layers from remaining keys
layer_indices = sorted(
{idx for name in shapes.keys() if (idx := _layer_index(name)) is not None}
)
total_layers = (max(layer_indices) + 1) if layer_indices else 0
total = 0
trainable = 0
for name, shape in shapes.items():
count = _tensor_numel(shape)
total += count
if _is_trainable(name, cfg, total_layers):
trainable += count
print(f"Total params: {total:,}")
print(f"Trainable params: {trainable:,}")
if __name__ == "__main__":
main()