sumanthpashuparthi commited on
Commit
bb4cf72
Β·
verified Β·
1 Parent(s): 1f904c9

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. convert_v20_v21.py +161 -0
  2. fix_indexing.py +224 -0
  3. scripts/__init__.py +0 -0
  4. scripts/compute_norm_stats.py +177 -0
  5. scripts/docker/compose.yml +29 -0
  6. scripts/docker/install_docker_ubuntu22.sh +37 -0
  7. scripts/docker/install_nvidia_container_toolkit.sh +17 -0
  8. scripts/docker/serve_policy.Dockerfile +38 -0
  9. scripts/serve_policy.py +122 -0
  10. scripts/train.py +280 -0
  11. scripts/train_pytorch.py +632 -0
  12. scripts/train_test.py +30 -0
  13. src/openpi/__init__.py +0 -0
  14. src/openpi/__pycache__/__init__.cpython-311.pyc +0 -0
  15. src/openpi/__pycache__/transforms.cpython-311.pyc +0 -0
  16. src/openpi/conftest.py +17 -0
  17. src/openpi/models/__init__.py +0 -0
  18. src/openpi/models/__pycache__/__init__.cpython-311.pyc +0 -0
  19. src/openpi/models/__pycache__/gemma.cpython-311.pyc +0 -0
  20. src/openpi/models/__pycache__/gemma_fast.cpython-311.pyc +0 -0
  21. src/openpi/models/__pycache__/lora.cpython-311.pyc +0 -0
  22. src/openpi/models/__pycache__/model.cpython-311.pyc +0 -0
  23. src/openpi/models/__pycache__/pi0.cpython-311.pyc +0 -0
  24. src/openpi/models/__pycache__/pi0_config.cpython-311.pyc +0 -0
  25. src/openpi/models/__pycache__/pi0_fast.cpython-311.pyc +0 -0
  26. src/openpi/models/__pycache__/siglip.cpython-311.pyc +0 -0
  27. src/openpi/models/__pycache__/tokenizer.cpython-311.pyc +0 -0
  28. src/openpi/models/gemma.py +459 -0
  29. src/openpi/models/gemma_fast.py +437 -0
  30. src/openpi/models/lora.py +148 -0
  31. src/openpi/models/lora_test.py +94 -0
  32. src/openpi/models/model.py +332 -0
  33. src/openpi/models/model_test.py +94 -0
  34. src/openpi/models/pi0.py +283 -0
  35. src/openpi/models/pi0_config.py +108 -0
  36. src/openpi/models/pi0_fast.py +313 -0
  37. src/openpi/models/pi0_test.py +46 -0
  38. src/openpi/models/siglip.py +373 -0
  39. src/openpi/models/tokenizer.py +371 -0
  40. src/openpi/models/tokenizer_test.py +27 -0
  41. src/openpi/models/utils/__pycache__/fsq_tokenizer.cpython-311.pyc +0 -0
  42. src/openpi/models/utils/fsq_tokenizer.py +472 -0
  43. src/openpi/models/vit.py +307 -0
  44. src/openpi/models_pytorch/__pycache__/gemma_pytorch.cpython-311.pyc +0 -0
  45. src/openpi/models_pytorch/__pycache__/pi0_pytorch.cpython-311.pyc +0 -0
  46. src/openpi/models_pytorch/__pycache__/preprocessing_pytorch.cpython-311.pyc +0 -0
  47. src/openpi/models_pytorch/gemma_pytorch.py +281 -0
  48. src/openpi/models_pytorch/pi0_pytorch.py +463 -0
  49. src/openpi/models_pytorch/preprocessing_pytorch.py +173 -0
  50. src/openpi/models_pytorch/transformers_replace/models/gemma/configuration_gemma.py +173 -0
convert_v20_v21.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Convert a local LeRobot dataset from v2.0 to v2.1, or generate episodes_stats.jsonl
4
+ for a dataset already marked as v2.1 but missing the file.
5
+
6
+ Usage:
7
+ uv run convert_local_dataset_v20_to_v21.py --dataset-path /mnt/ugreen-nfs/jetson-raw-data/lerobot_merged
8
+ """
9
+
10
+ import argparse
11
+ import json
12
+ import os
13
+ from pathlib import Path
14
+
15
+ # Set offline mode before importing to avoid HuggingFace Hub access
16
+ os.environ["HF_HUB_OFFLINE"] = "1"
17
+
18
+ from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
19
+ from lerobot.common.datasets.utils import EPISODES_STATS_PATH, STATS_PATH, load_stats, write_info
20
+ from lerobot.common.datasets.v21.convert_stats import check_aggregate_stats, convert_stats
21
+
22
+ # Monkey-patch get_safe_version to avoid HuggingFace Hub access for local datasets
23
+ from lerobot.common.datasets import utils as lerobot_utils
24
+ _original_get_safe_version = lerobot_utils.get_safe_version
25
+
26
+ def _patched_get_safe_version(repo_id, revision):
27
+ """Patched version that returns revision directly for local datasets."""
28
+ if repo_id is None or repo_id.startswith("local"):
29
+ return revision
30
+ return _original_get_safe_version(repo_id, revision)
31
+
32
+ lerobot_utils.get_safe_version = _patched_get_safe_version
33
+
34
+ V20 = "v2.0"
35
+ V21 = "v2.1"
36
+
37
+
38
+ def convert_local_dataset(dataset_path: str, num_workers: int = 4):
39
+ """Convert a local LeRobot dataset from v2.0 to v2.1, or generate missing episodes_stats.jsonl."""
40
+ dataset_root = Path(dataset_path).resolve()
41
+
42
+ if not dataset_root.exists():
43
+ raise ValueError(f"Dataset path does not exist: {dataset_root}")
44
+
45
+ # Read info.json to check current version
46
+ info_path = dataset_root / "meta" / "info.json"
47
+ if not info_path.exists():
48
+ raise ValueError(f"info.json not found at {info_path}")
49
+
50
+ with open(info_path, 'r') as f:
51
+ info = json.load(f)
52
+
53
+ current_version = info.get("codebase_version", "unknown")
54
+ episodes_stats_path = dataset_root / "meta" / EPISODES_STATS_PATH
55
+
56
+ # Check if episodes_stats.jsonl already exists
57
+ if episodes_stats_path.exists() and current_version == V21:
58
+ print(f"Dataset is already v2.1 with episodes_stats.jsonl. Nothing to do.")
59
+ return
60
+
61
+ # If v2.1 but missing episodes_stats.jsonl, we need to generate it
62
+ # Temporarily set version to v2.0 to allow loading, then generate stats
63
+ needs_version_restore = False
64
+ if current_version == V21 and not episodes_stats_path.exists():
65
+ print(f"Dataset is marked as v2.1 but missing {EPISODES_STATS_PATH}.")
66
+ print(f"Temporarily setting version to v2.0 to generate stats...")
67
+ info["codebase_version"] = V20
68
+ with open(info_path, 'w') as f:
69
+ json.dump(info, f, indent=4)
70
+ needs_version_restore = True
71
+
72
+ # Load the dataset - the monkey-patch should allow this to work now
73
+ print("Loading dataset...")
74
+ dataset = LeRobotDataset("local", root=dataset_root, revision=V20)
75
+
76
+ # Check if it's actually v2.0 (or was temporarily set to v2.0)
77
+ if dataset.meta.info.get("codebase_version") != V20:
78
+ current_version = dataset.meta.info.get("codebase_version", "unknown")
79
+ print(f"Warning: Dataset is version {current_version}, not {V20}. Skipping conversion.")
80
+ if needs_version_restore:
81
+ # Restore original version
82
+ info["codebase_version"] = V21
83
+ with open(info_path, 'w') as f:
84
+ json.dump(info, f, indent=4)
85
+ return
86
+
87
+ print(f"Converting dataset from {V20} to {V21}...")
88
+ print(f"Dataset path: {dataset_root}")
89
+
90
+ # Remove existing episodes_stats.jsonl if it exists
91
+ episodes_stats_path = dataset_root / "meta" / EPISODES_STATS_PATH
92
+ if episodes_stats_path.is_file():
93
+ print(f"Removing existing {EPISODES_STATS_PATH}...")
94
+ episodes_stats_path.unlink()
95
+
96
+ # Convert stats
97
+ print("Computing per-episode stats...")
98
+ try:
99
+ convert_stats(dataset, num_workers=num_workers)
100
+ except Exception as e:
101
+ print(f"Error during stats conversion: {e}")
102
+ if needs_version_restore:
103
+ # Restore original version on error
104
+ info["codebase_version"] = V21
105
+ with open(info_path, 'w') as f:
106
+ json.dump(info, f, indent=4)
107
+ raise
108
+
109
+ # Load reference stats and check consistency (if stats.json exists)
110
+ stats_path = dataset_root / "meta" / STATS_PATH
111
+ if stats_path.is_file():
112
+ print("Checking consistency with aggregate stats...")
113
+ ref_stats = load_stats(dataset_root)
114
+ if ref_stats is not None:
115
+ try:
116
+ check_aggregate_stats(dataset, ref_stats)
117
+ except Exception as e:
118
+ print(f"Warning: Consistency check failed: {e}")
119
+ else:
120
+ print("Warning: Could not load reference stats, skipping consistency check.")
121
+ else:
122
+ print("No stats.json found, skipping consistency check.")
123
+
124
+ # Update codebase version in info.json
125
+ print("Updating codebase version...")
126
+ info["codebase_version"] = CODEBASE_VERSION
127
+ write_info(info, dataset_root)
128
+
129
+ # Delete old stats.json file if it exists
130
+ stats_path = dataset_root / "meta" / STATS_PATH
131
+ if stats_path.is_file():
132
+ print(f"Removing deprecated {STATS_PATH}...")
133
+ stats_path.unlink()
134
+
135
+ print(f"βœ“ Successfully converted dataset to {V21}!")
136
+ print(f" Updated: {dataset_root / 'meta' / 'info.json'}")
137
+ if episodes_stats_path.exists():
138
+ print(f" Created: {episodes_stats_path}")
139
+
140
+
141
+ if __name__ == "__main__":
142
+ parser = argparse.ArgumentParser(
143
+ description="Convert a local LeRobot dataset from v2.0 to v2.1"
144
+ )
145
+ parser.add_argument(
146
+ "--dataset-path",
147
+ type=str,
148
+ required=True,
149
+ help="Path to the local LeRobot dataset directory",
150
+ )
151
+ parser.add_argument(
152
+ "--num-workers",
153
+ type=int,
154
+ default=4,
155
+ help="Number of workers for parallelizing stats compute. Defaults to 4.",
156
+ )
157
+
158
+ args = parser.parse_args()
159
+ convert_local_dataset(args.dataset_path, num_workers=args.num_workers)
160
+
161
+
fix_indexing.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Script to fix indexing issues in lerobot_long dataset after some episodes were deleted.
4
+ Renames files to have contiguous indices and updates metadata.
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import shutil
10
+ import pyarrow.parquet as pq
11
+ import pyarrow as pa
12
+
13
+ DATASET_PATH = "/mnt/ugreen-nfs/jetson-raw-data/lerobot_merged"
14
+ DATA_DIR = os.path.join(DATASET_PATH, "data", "chunk-000")
15
+ VIDEOS_DIR = os.path.join(DATASET_PATH, "videos", "chunk-000")
16
+ META_DIR = os.path.join(DATASET_PATH, "meta")
17
+
18
+ VIDEO_KEYS = [
19
+ "observation.images.workspace",
20
+ "observation.images.right_wrist",
21
+ "observation.images.left_wrist",
22
+ "observation.images.chest"
23
+ ]
24
+
25
+ def get_existing_episode_indices():
26
+ """Scan parquet files to find which episode indices actually exist."""
27
+ indices = []
28
+ for filename in os.listdir(DATA_DIR):
29
+ if filename.endswith(".parquet"):
30
+ # Extract episode index from filename like episode_000123.parquet
31
+ idx_str = filename.replace("episode_", "").replace(".parquet", "")
32
+ indices.append(int(idx_str))
33
+ return sorted(indices)
34
+
35
+ def create_index_mapping(old_indices):
36
+ """Create mapping from old indices to new contiguous indices."""
37
+ return {old_idx: new_idx for new_idx, old_idx in enumerate(old_indices)}
38
+
39
+ def rename_files_with_temp(index_mapping):
40
+ """
41
+ Rename all files using a two-step process to avoid conflicts.
42
+ First rename to temp names, then to final names.
43
+ """
44
+ # Step 1: Rename all to temporary names
45
+ print("Step 1: Renaming to temporary names...")
46
+
47
+ # Rename parquet files to temp
48
+ for old_idx in index_mapping.keys():
49
+ old_name = f"episode_{old_idx:06d}.parquet"
50
+ temp_name = f"temp_episode_{old_idx:06d}.parquet"
51
+ old_path = os.path.join(DATA_DIR, old_name)
52
+ temp_path = os.path.join(DATA_DIR, temp_name)
53
+ if os.path.exists(old_path):
54
+ os.rename(old_path, temp_path)
55
+
56
+ # Rename video files to temp
57
+ for video_key in VIDEO_KEYS:
58
+ video_dir = os.path.join(VIDEOS_DIR, video_key)
59
+ for old_idx in index_mapping.keys():
60
+ old_name = f"episode_{old_idx:06d}.mp4"
61
+ temp_name = f"temp_episode_{old_idx:06d}.mp4"
62
+ old_path = os.path.join(video_dir, old_name)
63
+ temp_path = os.path.join(video_dir, temp_name)
64
+ if os.path.exists(old_path):
65
+ os.rename(old_path, temp_path)
66
+
67
+ # Step 2: Rename from temp to final names
68
+ print("Step 2: Renaming to final contiguous names...")
69
+
70
+ # Rename parquet files from temp to final
71
+ for old_idx, new_idx in index_mapping.items():
72
+ temp_name = f"temp_episode_{old_idx:06d}.parquet"
73
+ new_name = f"episode_{new_idx:06d}.parquet"
74
+ temp_path = os.path.join(DATA_DIR, temp_name)
75
+ new_path = os.path.join(DATA_DIR, new_name)
76
+ if os.path.exists(temp_path):
77
+ os.rename(temp_path, new_path)
78
+ print(f" Parquet: {old_idx} -> {new_idx}")
79
+
80
+ # Rename video files from temp to final
81
+ for video_key in VIDEO_KEYS:
82
+ video_dir = os.path.join(VIDEOS_DIR, video_key)
83
+ for old_idx, new_idx in index_mapping.items():
84
+ temp_name = f"temp_episode_{old_idx:06d}.mp4"
85
+ new_name = f"episode_{new_idx:06d}.mp4"
86
+ temp_path = os.path.join(video_dir, temp_name)
87
+ new_path = os.path.join(video_dir, new_name)
88
+ if os.path.exists(temp_path):
89
+ os.rename(temp_path, new_path)
90
+
91
+ def update_parquet_episode_indices(index_mapping):
92
+ """Update episode_index column inside each parquet file."""
93
+ print("Updating episode_index inside parquet files...")
94
+
95
+ for old_idx, new_idx in index_mapping.items():
96
+ if old_idx == new_idx:
97
+ continue # No change needed
98
+
99
+ parquet_path = os.path.join(DATA_DIR, f"episode_{new_idx:06d}.parquet")
100
+ if os.path.exists(parquet_path):
101
+ # Read the parquet file
102
+ table = pq.read_table(parquet_path)
103
+
104
+ # Check if episode_index column exists
105
+ if "episode_index" in table.column_names:
106
+ # Create new episode_index array with the new index
107
+ num_rows = table.num_rows
108
+ new_episode_index = pa.array([new_idx] * num_rows, type=pa.int64())
109
+
110
+ # Replace the column
111
+ col_idx = table.column_names.index("episode_index")
112
+ table = table.set_column(col_idx, "episode_index", new_episode_index)
113
+
114
+ # Write back
115
+ pq.write_table(table, parquet_path)
116
+ print(f" Updated episode_index in episode_{new_idx:06d}.parquet: {old_idx} -> {new_idx}")
117
+
118
+ def get_episode_length_from_parquet(episode_idx):
119
+ """Get the number of frames in an episode from its parquet file."""
120
+ parquet_path = os.path.join(DATA_DIR, f"episode_{episode_idx:06d}.parquet")
121
+ if os.path.exists(parquet_path):
122
+ table = pq.read_table(parquet_path)
123
+ return table.num_rows
124
+ return 0
125
+
126
+ def regenerate_episodes_jsonl(num_episodes, task_description):
127
+ """Regenerate episodes.jsonl with correct indices and lengths."""
128
+ print("Regenerating episodes.jsonl...")
129
+
130
+ episodes_path = os.path.join(META_DIR, "episodes.jsonl")
131
+
132
+ with open(episodes_path, 'w') as f:
133
+ for idx in range(num_episodes):
134
+ length = get_episode_length_from_parquet(idx)
135
+ entry = {
136
+ "episode_index": idx,
137
+ "length": length,
138
+ "tasks": [task_description]
139
+ }
140
+ f.write(json.dumps(entry) + "\n")
141
+
142
+ print(f" Written {num_episodes} entries to episodes.jsonl")
143
+
144
+ def update_info_json(num_episodes):
145
+ """Update info.json with correct episode count and total frames."""
146
+ print("Updating info.json...")
147
+
148
+ info_path = os.path.join(META_DIR, "info.json")
149
+
150
+ # Read existing info
151
+ with open(info_path, 'r') as f:
152
+ info = json.load(f)
153
+
154
+ # Calculate total frames
155
+ total_frames = 0
156
+ for idx in range(num_episodes):
157
+ total_frames += get_episode_length_from_parquet(idx)
158
+
159
+ # Update fields
160
+ info["total_episodes"] = num_episodes
161
+ info["total_frames"] = total_frames
162
+ info["total_videos"] = num_episodes * len(VIDEO_KEYS)
163
+ info["splits"] = {"train": f"0:{num_episodes}"}
164
+
165
+ # Write back
166
+ with open(info_path, 'w') as f:
167
+ json.dump(info, f, indent=4)
168
+
169
+ print(f" total_episodes: {num_episodes}")
170
+ print(f" total_frames: {total_frames}")
171
+ print(f" total_videos: {num_episodes * len(VIDEO_KEYS)}")
172
+ print(f" splits: 0:{num_episodes}")
173
+
174
+ def main():
175
+ print("=" * 60)
176
+ print("Fixing lerobot_long dataset indexing")
177
+ print("=" * 60)
178
+
179
+ # Step 1: Find existing episodes
180
+ print("\nScanning for existing episodes...")
181
+ old_indices = get_existing_episode_indices()
182
+ print(f"Found {len(old_indices)} episodes")
183
+ print(f"Old indices range: {min(old_indices)} to {max(old_indices)}")
184
+
185
+ # Find gaps
186
+ expected = set(range(min(old_indices), max(old_indices) + 1))
187
+ actual = set(old_indices)
188
+ missing = expected - actual
189
+ if missing:
190
+ print(f"Missing indices (gaps): {sorted(missing)}")
191
+
192
+ # Step 2: Create mapping
193
+ index_mapping = create_index_mapping(old_indices)
194
+ print(f"\nWill remap to contiguous indices: 0 to {len(old_indices) - 1}")
195
+
196
+ # Step 3: Rename files
197
+ print("\nRenaming files...")
198
+ rename_files_with_temp(index_mapping)
199
+
200
+ # Step 4: Update parquet contents
201
+ print("\nUpdating parquet file contents...")
202
+ update_parquet_episode_indices(index_mapping)
203
+
204
+ # Step 5: Get task description from existing tasks.jsonl
205
+ tasks_path = os.path.join(META_DIR, "tasks.jsonl")
206
+ with open(tasks_path, 'r') as f:
207
+ task_data = json.loads(f.readline())
208
+ task_description = task_data["task"]
209
+
210
+ # Step 6: Regenerate episodes.jsonl
211
+ print("\nRegenerating metadata...")
212
+ regenerate_episodes_jsonl(len(old_indices), task_description)
213
+
214
+ # Step 7: Update info.json
215
+ update_info_json(len(old_indices))
216
+
217
+ print("\n" + "=" * 60)
218
+ print("Done! Dataset indexing has been fixed.")
219
+ print(f"Total episodes: {len(old_indices)} (contiguous 0-{len(old_indices)-1})")
220
+ print("=" * 60)
221
+
222
+ if __name__ == "__main__":
223
+ main()
224
+
scripts/__init__.py ADDED
File without changes
scripts/compute_norm_stats.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compute normalization statistics for a config.
2
+
3
+ This script is used to compute the normalization statistics for a given config. It
4
+ will compute the mean and standard deviation of the data in the dataset and save it
5
+ to the config assets directory.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import pathlib
11
+
12
+ import numpy as np
13
+ import tqdm
14
+ import tyro
15
+ from datasets import load_dataset
16
+
17
+ import openpi.models.model as _model
18
+ import openpi.shared.normalize as normalize
19
+ import openpi.training.config as _config
20
+ import openpi.training.data_loader as _data_loader
21
+ import openpi.transforms as transforms
22
+
23
+
24
+ class RemoveStrings(transforms.DataTransformFn):
25
+ def __call__(self, x: dict) -> dict:
26
+ return {k: v for k, v in x.items() if not np.issubdtype(np.asarray(v).dtype, np.str_)}
27
+
28
+
29
+ def create_torch_dataloader(
30
+ data_config: _config.DataConfig,
31
+ action_horizon: int,
32
+ batch_size: int,
33
+ model_config: _model.BaseModelConfig,
34
+ num_workers: int,
35
+ max_frames: int | None = None,
36
+ ) -> tuple[_data_loader.Dataset, int]:
37
+ if data_config.repo_id is None:
38
+ raise ValueError("Data config must have a repo_id")
39
+ dataset = _data_loader.create_torch_dataset(data_config, action_horizon, model_config)
40
+ dataset = _data_loader.TransformedDataset(
41
+ dataset,
42
+ [
43
+ *data_config.repack_transforms.inputs,
44
+ *data_config.data_transforms.inputs,
45
+ # Remove strings since they are not supported by JAX and are not needed to compute norm stats.
46
+ RemoveStrings(),
47
+ ],
48
+ )
49
+ if max_frames is not None and max_frames < len(dataset):
50
+ num_batches = max_frames // batch_size
51
+ shuffle = True
52
+ else:
53
+ num_batches = len(dataset) // batch_size
54
+ shuffle = False
55
+ data_loader = _data_loader.TorchDataLoader(
56
+ dataset,
57
+ local_batch_size=batch_size,
58
+ num_workers=num_workers,
59
+ shuffle=shuffle,
60
+ num_batches=num_batches,
61
+ )
62
+ return data_loader, num_batches
63
+
64
+
65
+ def create_rlds_dataloader(
66
+ data_config: _config.DataConfig,
67
+ action_horizon: int,
68
+ batch_size: int,
69
+ max_frames: int | None = None,
70
+ ) -> tuple[_data_loader.Dataset, int]:
71
+ dataset = _data_loader.create_rlds_dataset(data_config, action_horizon, batch_size, shuffle=False)
72
+ dataset = _data_loader.IterableTransformedDataset(
73
+ dataset,
74
+ [
75
+ *data_config.repack_transforms.inputs,
76
+ *data_config.data_transforms.inputs,
77
+ # Remove strings since they are not supported by JAX and are not needed to compute norm stats.
78
+ RemoveStrings(),
79
+ ],
80
+ is_batched=True,
81
+ )
82
+ if max_frames is not None and max_frames < len(dataset):
83
+ num_batches = max_frames // batch_size
84
+ else:
85
+ # NOTE: this length is currently hard-coded for DROID.
86
+ num_batches = len(dataset) // batch_size
87
+ data_loader = _data_loader.RLDSDataLoader(
88
+ dataset,
89
+ num_batches=num_batches,
90
+ )
91
+ return data_loader, num_batches
92
+
93
+
94
+ def _iter_parquet_batches(
95
+ dataset_root: pathlib.Path,
96
+ *,
97
+ batch_size: int,
98
+ max_frames: int | None,
99
+ ):
100
+ """Iterate parquet data in batches without decoding videos.
101
+
102
+ Expects a LeRobot-format dataset directory at `dataset_root` containing `data/`.
103
+ Only reads `observation.state` and `action` columns.
104
+ """
105
+ data_dir = dataset_root / "data"
106
+ if not data_dir.exists():
107
+ raise FileNotFoundError(f"Expected parquet directory at {data_dir}")
108
+
109
+ ds = load_dataset("parquet", data_dir=str(data_dir), split="train")
110
+ total = len(ds)
111
+ limit = total if max_frames is None else min(total, max_frames)
112
+
113
+ # Access only required columns to reduce work.
114
+ state_col = "observation.state"
115
+ action_col = "action"
116
+
117
+ for start in range(0, limit, batch_size):
118
+ end = min(limit, start + batch_size)
119
+ batch = ds[start:end]
120
+ # HF datasets returns lists; convert to numpy arrays.
121
+ yield {
122
+ "state": np.asarray(batch[state_col]),
123
+ "actions": np.asarray(batch[action_col]),
124
+ }
125
+
126
+
127
+ def main(config_name: str, max_frames: int | None = None, parquet_only: bool = False):
128
+ config = _config.get_config(config_name)
129
+ data_config = config.data.create(config.assets_dirs, config.model)
130
+
131
+ if parquet_only:
132
+ if data_config.rlds_data_dir is not None:
133
+ raise ValueError("--parquet-only is only supported for LeRobot (parquet) datasets, not RLDS.")
134
+ if data_config.repo_id is None:
135
+ raise ValueError("Data config must have a repo_id")
136
+ if data_config.lerobot_root is None:
137
+ raise ValueError(
138
+ "Data config must set `lerobot_root` for --parquet-only (path to the LeRobot dataset directory)."
139
+ )
140
+ # NOTE: In LeRobot, `root` is the dataset directory containing `meta/`, `data/`, and `videos/`.
141
+ dataset_root = pathlib.Path(data_config.lerobot_root)
142
+ data_loader = _iter_parquet_batches(
143
+ dataset_root,
144
+ batch_size=config.batch_size,
145
+ max_frames=max_frames,
146
+ )
147
+ if max_frames is not None:
148
+ num_batches = max_frames // config.batch_size
149
+ else:
150
+ # Derive total batches from parquet dataset length (requires reading dataset metadata once).
151
+ num_examples = len(load_dataset("parquet", data_dir=str(dataset_root / "data"), split="train"))
152
+ num_batches = num_examples // config.batch_size
153
+ elif data_config.rlds_data_dir is not None:
154
+ data_loader, num_batches = create_rlds_dataloader(
155
+ data_config, config.model.action_horizon, config.batch_size, max_frames
156
+ )
157
+ else:
158
+ data_loader, num_batches = create_torch_dataloader(
159
+ data_config, config.model.action_horizon, config.batch_size, config.model, config.num_workers, max_frames
160
+ )
161
+
162
+ keys = ["state", "actions"]
163
+ stats = {key: normalize.RunningStats() for key in keys}
164
+
165
+ for batch in tqdm.tqdm(data_loader, total=num_batches, desc="Computing stats"):
166
+ for key in keys:
167
+ stats[key].update(np.asarray(batch[key]))
168
+
169
+ norm_stats = {key: stats.get_statistics() for key, stats in stats.items()}
170
+
171
+ output_path = config.assets_dirs / data_config.repo_id
172
+ print(f"Writing stats to: {output_path}")
173
+ normalize.save(output_path, norm_stats)
174
+
175
+
176
+ if __name__ == "__main__":
177
+ tyro.cli(main)
scripts/docker/compose.yml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Run with:
2
+ # docker compose -f scripts/docker/compose.yml up --build
3
+ services:
4
+ openpi_server:
5
+ image: openpi_server
6
+ build:
7
+ context: ../..
8
+ dockerfile: scripts/docker/serve_policy.Dockerfile
9
+ init: true
10
+ tty: true
11
+ network_mode: host
12
+ # Populate configured openpi data home to /openpi_assets inside the container.
13
+ # Populate aws credential inside the container.
14
+ volumes:
15
+ - $PWD:/app
16
+ - ${OPENPI_DATA_HOME:-~/.cache/openpi}:/openpi_assets
17
+ environment:
18
+ - SERVER_ARGS
19
+ - OPENPI_DATA_HOME=/openpi_assets
20
+ - IS_DOCKER=true
21
+
22
+ # Comment out this block if not running on a machine with GPUs.
23
+ deploy:
24
+ resources:
25
+ reservations:
26
+ devices:
27
+ - driver: nvidia
28
+ count: 1
29
+ capabilities: [gpu]
scripts/docker/install_docker_ubuntu22.sh ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Add Docker's official GPG key:
4
+ sudo apt-get update
5
+ sudo apt-get install -y ca-certificates curl
6
+ sudo install -m 0755 -d /etc/apt/keyrings
7
+ sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
8
+ sudo chmod a+r /etc/apt/keyrings/docker.asc
9
+
10
+ # Add the repository to Apt sources:
11
+ echo \
12
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
13
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" |
14
+ sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
15
+ sudo apt-get update
16
+
17
+ sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
18
+
19
+ # Add current user to the 'docker' group, which allows them to use docker commands (docker build, docker run, etc).
20
+ # See https://docs.docker.com/engine/install/linux-postinstall/
21
+ username=$(whoami)
22
+ sudo usermod -aG docker $username
23
+
24
+ # Configure docker to start automatically on system boot.
25
+ sudo systemctl enable docker.service
26
+ sudo systemctl enable containerd.service
27
+
28
+ # https://forums.docker.com/t/docker-credential-desktop-exe-executable-file-not-found-in-path-using-wsl2/100225/5
29
+ if [ ~/.docker/config.json ]; then
30
+ sed -i 's/credsStore/credStore/g' ~/.docker/config.json
31
+ fi
32
+
33
+ echo ""
34
+ echo "********************************************************************"
35
+ echo "**** Restart to allow Docker permission changes to take effect. ****"
36
+ echo "********************************************************************"
37
+ echo ""
scripts/docker/install_nvidia_container_toolkit.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Installs the NVIDIA Container Toolkit, which allows Docker containers to access NVIDIA GPUs.
4
+ # NVIDIA's official documentation: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
5
+
6
+ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg &&
7
+ curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list |
8
+ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' |
9
+ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
10
+
11
+ # NVIDIA's documentation omits 'sudo' in the following command, but it is required.
12
+ sudo sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/nvidia-container-toolkit.list
13
+ sudo apt-get update
14
+ sudo apt-get install -y nvidia-container-toolkit
15
+
16
+ sudo nvidia-ctk runtime configure --runtime=docker
17
+ sudo systemctl restart docker
scripts/docker/serve_policy.Dockerfile ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile for serving a PI policy.
2
+ # Based on UV's instructions: https://docs.astral.sh/uv/guides/integration/docker/#developing-in-a-container
3
+
4
+ # Build the container:
5
+ # docker build . -t openpi_server -f scripts/docker/serve_policy.Dockerfile
6
+
7
+ # Run the container:
8
+ # docker run --rm -it --network=host -v .:/app --gpus=all openpi_server /bin/bash
9
+
10
+ FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04@sha256:2d913b09e6be8387e1a10976933642c73c840c0b735f0bf3c28d97fc9bc422e0
11
+ COPY --from=ghcr.io/astral-sh/uv:0.5.1 /uv /uvx /bin/
12
+
13
+ WORKDIR /app
14
+
15
+ # Needed because LeRobot uses git-lfs.
16
+ RUN apt-get update && apt-get install -y git git-lfs linux-headers-generic build-essential clang
17
+
18
+ # Copy from the cache instead of linking since it's a mounted volume
19
+ ENV UV_LINK_MODE=copy
20
+
21
+ # Write the virtual environment outside of the project directory so it doesn't
22
+ # leak out of the container when we mount the application code.
23
+ ENV UV_PROJECT_ENVIRONMENT=/.venv
24
+
25
+ # Install the project's dependencies using the lockfile and settings
26
+ RUN uv venv --python 3.11.9 $UV_PROJECT_ENVIRONMENT
27
+ RUN --mount=type=cache,target=/root/.cache/uv \
28
+ --mount=type=bind,source=uv.lock,target=uv.lock \
29
+ --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
30
+ --mount=type=bind,source=packages/openpi-client/pyproject.toml,target=packages/openpi-client/pyproject.toml \
31
+ --mount=type=bind,source=packages/openpi-client/src,target=packages/openpi-client/src \
32
+ GIT_LFS_SKIP_SMUDGE=1 uv sync --frozen --no-install-project --no-dev
33
+
34
+ # Copy transformers_replace files while preserving directory structure
35
+ COPY src/openpi/models_pytorch/transformers_replace/ /tmp/transformers_replace/
36
+ RUN /.venv/bin/python -c "import transformers; print(transformers.__file__)" | xargs dirname | xargs -I{} cp -r /tmp/transformers_replace/* {} && rm -rf /tmp/transformers_replace
37
+
38
+ CMD /bin/bash -c "uv run scripts/serve_policy.py $SERVER_ARGS"
scripts/serve_policy.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import enum
3
+ import logging
4
+ import socket
5
+
6
+ import tyro
7
+
8
+ from openpi.policies import policy as _policy
9
+ from openpi.policies import policy_config as _policy_config
10
+ from openpi.serving import websocket_policy_server
11
+ from openpi.training import config as _config
12
+
13
+
14
+ class EnvMode(enum.Enum):
15
+ """Supported environments."""
16
+
17
+ ALOHA = "aloha"
18
+ ALOHA_SIM = "aloha_sim"
19
+ DROID = "droid"
20
+ LIBERO = "libero"
21
+
22
+
23
+ @dataclasses.dataclass
24
+ class Checkpoint:
25
+ """Load a policy from a trained checkpoint."""
26
+
27
+ # Training config name (e.g., "pi0_aloha_sim").
28
+ config: str
29
+ # Checkpoint directory (e.g., "checkpoints/pi0_aloha_sim/exp/10000").
30
+ dir: str
31
+
32
+
33
+ @dataclasses.dataclass
34
+ class Default:
35
+ """Use the default policy for the given environment."""
36
+
37
+
38
+ @dataclasses.dataclass
39
+ class Args:
40
+ """Arguments for the serve_policy script."""
41
+
42
+ # Environment to serve the policy for. This is only used when serving default policies.
43
+ env: EnvMode = EnvMode.ALOHA_SIM
44
+
45
+ # If provided, will be used in case the "prompt" key is not present in the data, or if the model doesn't have a default
46
+ # prompt.
47
+ default_prompt: str | None = None
48
+
49
+ # Port to serve the policy on.
50
+ port: int = 8000
51
+ # Record the policy's behavior for debugging.
52
+ record: bool = False
53
+
54
+ # Specifies how to load the policy. If not provided, the default policy for the environment will be used.
55
+ policy: Checkpoint | Default = dataclasses.field(default_factory=Default)
56
+
57
+
58
+ # Default checkpoints that should be used for each environment.
59
+ DEFAULT_CHECKPOINT: dict[EnvMode, Checkpoint] = {
60
+ EnvMode.ALOHA: Checkpoint(
61
+ config="pi05_aloha",
62
+ dir="gs://openpi-assets/checkpoints/pi05_base",
63
+ ),
64
+ EnvMode.ALOHA_SIM: Checkpoint(
65
+ config="pi0_aloha_sim",
66
+ dir="gs://openpi-assets/checkpoints/pi0_aloha_sim",
67
+ ),
68
+ EnvMode.DROID: Checkpoint(
69
+ config="pi05_droid",
70
+ dir="gs://openpi-assets/checkpoints/pi05_droid",
71
+ ),
72
+ EnvMode.LIBERO: Checkpoint(
73
+ config="pi05_libero",
74
+ dir="gs://openpi-assets/checkpoints/pi05_libero",
75
+ ),
76
+ }
77
+
78
+
79
+ def create_default_policy(env: EnvMode, *, default_prompt: str | None = None) -> _policy.Policy:
80
+ """Create a default policy for the given environment."""
81
+ if checkpoint := DEFAULT_CHECKPOINT.get(env):
82
+ return _policy_config.create_trained_policy(
83
+ _config.get_config(checkpoint.config), checkpoint.dir, default_prompt=default_prompt
84
+ )
85
+ raise ValueError(f"Unsupported environment mode: {env}")
86
+
87
+
88
+ def create_policy(args: Args) -> _policy.Policy:
89
+ """Create a policy from the given arguments."""
90
+ match args.policy:
91
+ case Checkpoint():
92
+ return _policy_config.create_trained_policy(
93
+ _config.get_config(args.policy.config), args.policy.dir, default_prompt=args.default_prompt
94
+ )
95
+ case Default():
96
+ return create_default_policy(args.env, default_prompt=args.default_prompt)
97
+
98
+
99
+ def main(args: Args) -> None:
100
+ policy = create_policy(args)
101
+ policy_metadata = policy.metadata
102
+
103
+ # Record the policy's behavior.
104
+ if args.record:
105
+ policy = _policy.PolicyRecorder(policy, "policy_records")
106
+
107
+ hostname = socket.gethostname()
108
+ local_ip = socket.gethostbyname(hostname)
109
+ logging.info("Creating server (host: %s, ip: %s)", hostname, local_ip)
110
+
111
+ server = websocket_policy_server.WebsocketPolicyServer(
112
+ policy=policy,
113
+ host="0.0.0.0",
114
+ port=args.port,
115
+ metadata=policy_metadata,
116
+ )
117
+ server.serve_forever()
118
+
119
+
120
+ if __name__ == "__main__":
121
+ logging.basicConfig(level=logging.INFO, force=True)
122
+ main(tyro.cli(Args))
scripts/train.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import functools
3
+ import logging
4
+ import platform
5
+ from typing import Any
6
+
7
+ import etils.epath as epath
8
+ import flax.nnx as nnx
9
+ from flax.training import common_utils
10
+ import flax.traverse_util as traverse_util
11
+ import jax
12
+ import jax.experimental
13
+ import jax.numpy as jnp
14
+ import numpy as np
15
+ import optax
16
+ import tqdm_loggable.auto as tqdm
17
+ import wandb
18
+
19
+ import openpi.models.model as _model
20
+ import openpi.shared.array_typing as at
21
+ import openpi.shared.nnx_utils as nnx_utils
22
+ import openpi.training.checkpoints as _checkpoints
23
+ import openpi.training.config as _config
24
+ import openpi.training.data_loader as _data_loader
25
+ import openpi.training.optimizer as _optimizer
26
+ import openpi.training.sharding as sharding
27
+ import openpi.training.utils as training_utils
28
+ import openpi.training.weight_loaders as _weight_loaders
29
+
30
+
31
+ def init_logging():
32
+ """Custom logging format for better readability."""
33
+ level_mapping = {"DEBUG": "D", "INFO": "I", "WARNING": "W", "ERROR": "E", "CRITICAL": "C"}
34
+
35
+ class CustomFormatter(logging.Formatter):
36
+ def format(self, record):
37
+ record.levelname = level_mapping.get(record.levelname, record.levelname)
38
+ return super().format(record)
39
+
40
+ formatter = CustomFormatter(
41
+ fmt="%(asctime)s.%(msecs)03d [%(levelname)s] %(message)-80s (%(process)d:%(filename)s:%(lineno)s)",
42
+ datefmt="%H:%M:%S",
43
+ )
44
+
45
+ logger = logging.getLogger()
46
+ logger.setLevel(logging.INFO)
47
+ logger.handlers[0].setFormatter(formatter)
48
+
49
+
50
+ def init_wandb(config: _config.TrainConfig, *, resuming: bool, log_code: bool = False, enabled: bool = True):
51
+ if not enabled:
52
+ wandb.init(mode="disabled")
53
+ return
54
+
55
+ ckpt_dir = config.checkpoint_dir
56
+ if not ckpt_dir.exists():
57
+ raise FileNotFoundError(f"Checkpoint directory {ckpt_dir} does not exist.")
58
+ if resuming:
59
+ run_id = (ckpt_dir / "wandb_id.txt").read_text().strip()
60
+ wandb.init(id=run_id, resume="must", project=config.project_name)
61
+ else:
62
+ wandb.init(
63
+ name=config.exp_name,
64
+ config=dataclasses.asdict(config),
65
+ project=config.project_name,
66
+ )
67
+ (ckpt_dir / "wandb_id.txt").write_text(wandb.run.id)
68
+
69
+ if log_code:
70
+ wandb.run.log_code(epath.Path(__file__).parent.parent)
71
+
72
+
73
+ def _load_weights_and_validate(loader: _weight_loaders.WeightLoader, params_shape: at.Params) -> at.Params:
74
+ """Loads and validates the weights. Returns a loaded subset of the weights."""
75
+ loaded_params = loader.load(params_shape)
76
+ at.check_pytree_equality(expected=params_shape, got=loaded_params, check_shapes=True, check_dtypes=True)
77
+
78
+ # Remove jax.ShapeDtypeStruct from the loaded params. This makes sure that only the loaded params are returned.
79
+ return traverse_util.unflatten_dict(
80
+ {k: v for k, v in traverse_util.flatten_dict(loaded_params).items() if not isinstance(v, jax.ShapeDtypeStruct)}
81
+ )
82
+
83
+
84
+ @at.typecheck
85
+ def init_train_state(
86
+ config: _config.TrainConfig, init_rng: at.KeyArrayLike, mesh: jax.sharding.Mesh, *, resume: bool
87
+ ) -> tuple[training_utils.TrainState, Any]:
88
+ tx = _optimizer.create_optimizer(config.optimizer, config.lr_schedule, weight_decay_mask=None)
89
+
90
+ def init(rng: at.KeyArrayLike, partial_params: at.Params | None = None) -> training_utils.TrainState:
91
+ rng, model_rng = jax.random.split(rng)
92
+ # initialize the model (and its parameters).
93
+ model = config.model.create(model_rng)
94
+
95
+ # Merge the partial params into the model.
96
+ if partial_params is not None:
97
+ graphdef, state = nnx.split(model)
98
+ # This will produce an error if the partial params are not a subset of the state.
99
+ state.replace_by_pure_dict(partial_params)
100
+ model = nnx.merge(graphdef, state)
101
+
102
+ params = nnx.state(model)
103
+ # Convert frozen params to bfloat16.
104
+ params = nnx_utils.state_map(params, config.freeze_filter, lambda p: p.replace(p.value.astype(jnp.bfloat16)))
105
+
106
+ return training_utils.TrainState(
107
+ step=0,
108
+ params=params,
109
+ model_def=nnx.graphdef(model),
110
+ tx=tx,
111
+ opt_state=tx.init(params.filter(config.trainable_filter)),
112
+ ema_decay=config.ema_decay,
113
+ ema_params=None if config.ema_decay is None else params,
114
+ )
115
+
116
+ train_state_shape = jax.eval_shape(init, init_rng)
117
+ state_sharding = sharding.fsdp_sharding(train_state_shape, mesh, log=True)
118
+
119
+ if resume:
120
+ return train_state_shape, state_sharding
121
+
122
+ partial_params = _load_weights_and_validate(config.weight_loader, train_state_shape.params.to_pure_dict())
123
+ replicated_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
124
+
125
+ # Initialize the train state and mix in the partial params.
126
+ train_state = jax.jit(
127
+ init,
128
+ donate_argnums=(1,), # donate the partial params buffer.
129
+ in_shardings=replicated_sharding,
130
+ out_shardings=state_sharding,
131
+ )(init_rng, partial_params)
132
+
133
+ return train_state, state_sharding
134
+
135
+
136
+ @at.typecheck
137
+ def train_step(
138
+ config: _config.TrainConfig,
139
+ rng: at.KeyArrayLike,
140
+ state: training_utils.TrainState,
141
+ batch: tuple[_model.Observation, _model.Actions],
142
+ ) -> tuple[training_utils.TrainState, dict[str, at.Array]]:
143
+ model = nnx.merge(state.model_def, state.params)
144
+ model.train()
145
+
146
+ @at.typecheck
147
+ def loss_fn(
148
+ model: _model.BaseModel, rng: at.KeyArrayLike, observation: _model.Observation, actions: _model.Actions
149
+ ):
150
+ chunked_loss = model.compute_loss(rng, observation, actions, train=True)
151
+ return jnp.mean(chunked_loss)
152
+
153
+ train_rng = jax.random.fold_in(rng, state.step)
154
+ observation, actions = batch
155
+
156
+ # Filter out frozen params.
157
+ diff_state = nnx.DiffState(0, config.trainable_filter)
158
+ loss, grads = nnx.value_and_grad(loss_fn, argnums=diff_state)(model, train_rng, observation, actions)
159
+
160
+ params = state.params.filter(config.trainable_filter)
161
+ updates, new_opt_state = state.tx.update(grads, state.opt_state, params)
162
+ new_params = optax.apply_updates(params, updates)
163
+
164
+ # Update the model in place and return the new full state.
165
+ nnx.update(model, new_params)
166
+ new_params = nnx.state(model)
167
+
168
+ new_state = dataclasses.replace(state, step=state.step + 1, params=new_params, opt_state=new_opt_state)
169
+ if state.ema_decay is not None:
170
+ new_state = dataclasses.replace(
171
+ new_state,
172
+ ema_params=jax.tree.map(
173
+ lambda old, new: state.ema_decay * old + (1 - state.ema_decay) * new, state.ema_params, new_params
174
+ ),
175
+ )
176
+
177
+ # Filter out params that aren't kernels.
178
+ kernel_params = nnx.state(
179
+ model,
180
+ nnx.All(
181
+ nnx.Param,
182
+ nnx.Not(nnx_utils.PathRegex(".*/(bias|scale|pos_embedding|input_embedding)")),
183
+ lambda _, x: x.value.ndim > 1,
184
+ ),
185
+ )
186
+ info = {
187
+ "loss": loss,
188
+ "grad_norm": optax.global_norm(grads),
189
+ "param_norm": optax.global_norm(kernel_params),
190
+ }
191
+ return new_state, info
192
+
193
+
194
+ def main(config: _config.TrainConfig):
195
+ init_logging()
196
+ logging.info(f"Running on: {platform.node()}")
197
+
198
+ if config.batch_size % jax.device_count() != 0:
199
+ raise ValueError(
200
+ f"Batch size {config.batch_size} must be divisible by the number of devices {jax.device_count()}."
201
+ )
202
+
203
+ jax.config.update("jax_compilation_cache_dir", str(epath.Path("~/.cache/jax").expanduser()))
204
+
205
+ rng = jax.random.key(config.seed)
206
+ train_rng, init_rng = jax.random.split(rng)
207
+
208
+ mesh = sharding.make_mesh(config.fsdp_devices)
209
+ data_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec(sharding.DATA_AXIS))
210
+ replicated_sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
211
+
212
+ checkpoint_manager, resuming = _checkpoints.initialize_checkpoint_dir(
213
+ config.checkpoint_dir,
214
+ keep_period=config.keep_period,
215
+ overwrite=config.overwrite,
216
+ resume=config.resume,
217
+ )
218
+ init_wandb(config, resuming=resuming, enabled=config.wandb_enabled)
219
+
220
+ data_loader = _data_loader.create_data_loader(
221
+ config,
222
+ sharding=data_sharding,
223
+ shuffle=True,
224
+ )
225
+ data_iter = iter(data_loader)
226
+ batch = next(data_iter)
227
+ logging.info(f"Initialized data loader:\n{training_utils.array_tree_to_info(batch)}")
228
+
229
+ # Log images from first batch to sanity check.
230
+ images_to_log = [
231
+ wandb.Image(np.concatenate([np.array(img[i]) for img in batch[0].images.values()], axis=1))
232
+ for i in range(min(5, len(next(iter(batch[0].images.values())))))
233
+ ]
234
+ wandb.log({"camera_views": images_to_log}, step=0)
235
+
236
+ train_state, train_state_sharding = init_train_state(config, init_rng, mesh, resume=resuming)
237
+ jax.block_until_ready(train_state)
238
+ logging.info(f"Initialized train state:\n{training_utils.array_tree_to_info(train_state.params)}")
239
+
240
+ if resuming:
241
+ train_state = _checkpoints.restore_state(checkpoint_manager, train_state, data_loader)
242
+
243
+ ptrain_step = jax.jit(
244
+ functools.partial(train_step, config),
245
+ in_shardings=(replicated_sharding, train_state_sharding, data_sharding),
246
+ out_shardings=(train_state_sharding, replicated_sharding),
247
+ donate_argnums=(1,),
248
+ )
249
+
250
+ start_step = int(train_state.step)
251
+ pbar = tqdm.tqdm(
252
+ range(start_step, config.num_train_steps),
253
+ initial=start_step,
254
+ total=config.num_train_steps,
255
+ dynamic_ncols=True,
256
+ )
257
+
258
+ infos = []
259
+ for step in pbar:
260
+ with sharding.set_mesh(mesh):
261
+ train_state, info = ptrain_step(train_rng, train_state, batch)
262
+ infos.append(info)
263
+ if step % config.log_interval == 0:
264
+ stacked_infos = common_utils.stack_forest(infos)
265
+ reduced_info = jax.device_get(jax.tree.map(jnp.mean, stacked_infos))
266
+ info_str = ", ".join(f"{k}={v:.4f}" for k, v in reduced_info.items())
267
+ pbar.write(f"Step {step}: {info_str}")
268
+ wandb.log(reduced_info, step=step)
269
+ infos = []
270
+ batch = next(data_iter)
271
+
272
+ if (step % config.save_interval == 0 and step > start_step) or step == config.num_train_steps - 1:
273
+ _checkpoints.save_state(checkpoint_manager, train_state, data_loader, step)
274
+
275
+ logging.info("Waiting for checkpoint manager to finish")
276
+ checkpoint_manager.wait_until_finished()
277
+
278
+
279
+ if __name__ == "__main__":
280
+ main(_config.cli())
scripts/train_pytorch.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PyTorch training entrypoint for PI0/PI05 with multi-GPU and multi-node (DDP) support.
3
+ This script mirrors the behavior of the JAX trainer (`scripts/train.py`) but runs
4
+ entirely in PyTorch using the `PI0Pytorch` model and your existing config/data
5
+ pipeline from `src/openpi/training/config.py` and `src/openpi/training/data_loader.py`.
6
+
7
+ Usage
8
+ Single GPU:
9
+ python scripts/train_pytorch.py <config_name> --exp_name <run_name> --save_interval <interval>
10
+ Example:
11
+ python scripts/train_pytorch.py debug --exp_name pytorch_ddp_test
12
+ python scripts/train_pytorch.py debug --exp_name pytorch_ddp_test --resume # Resume from latest checkpoint
13
+ Multi-GPU (single node):
14
+ torchrun --standalone --nnodes=1 --nproc_per_node=<num_gpus> scripts/train_pytorch.py <config_name> --exp_name <run_name>
15
+ Example:
16
+ torchrun --standalone --nnodes=1 --nproc_per_node=2 scripts/train_pytorch.py pi0_aloha_sim --exp_name pytorch_ddp_test
17
+ torchrun --standalone --nnodes=1 --nproc_per_node=2 scripts/train_pytorch.py pi0_aloha_sim --exp_name pytorch_ddp_test --resume
18
+ Multi-Node Training:
19
+ torchrun \
20
+ --nnodes=<num_nodes> --nproc_per_node=<gpus_per_node> --node_rank=<rank_of_node> \
21
+ --master_addr=<master_ip> --master_port=<port> \
22
+ scripts/train_pytorch.py <config_name> --exp_name=<run_name> --save_interval <interval>
23
+
24
+ """
25
+
26
+ import dataclasses
27
+ import gc
28
+ import logging
29
+ import os
30
+ import platform
31
+ import shutil
32
+ import time
33
+
34
+ import jax
35
+ import numpy as np
36
+ import safetensors.torch
37
+ import torch
38
+ import torch.distributed as dist
39
+ import torch.nn.parallel
40
+ import tqdm
41
+ import wandb
42
+
43
+ import openpi.models.pi0_config
44
+ import openpi.models_pytorch.pi0_pytorch
45
+ import openpi.shared.normalize as _normalize
46
+ import openpi.training.config as _config
47
+ import openpi.training.data_loader as _data
48
+
49
+
50
+ def init_logging():
51
+ level_mapping = {"DEBUG": "D", "INFO": "I", "WARNING": "W", "ERROR": "E", "CRITICAL": "C"}
52
+
53
+ class CustomFormatter(logging.Formatter):
54
+ def format(self, record):
55
+ record.levelname = level_mapping.get(record.levelname, record.levelname)
56
+ return super().format(record)
57
+
58
+ formatter = CustomFormatter(
59
+ fmt="%(asctime)s.%(msecs)03d [%(levelname)s] %(message)-80s (%(process)d:%(filename)s:%(lineno)s)",
60
+ datefmt="%H:%M:%S",
61
+ )
62
+ logger = logging.getLogger()
63
+ logger.setLevel(logging.INFO)
64
+ if not logger.handlers:
65
+ ch = logging.StreamHandler()
66
+ ch.setFormatter(formatter)
67
+ logger.addHandler(ch)
68
+ else:
69
+ logger.handlers[0].setFormatter(formatter)
70
+
71
+
72
+ def init_wandb(config: _config.TrainConfig, *, resuming: bool, enabled: bool = True):
73
+ """Initialize wandb logging."""
74
+ if not enabled:
75
+ wandb.init(mode="disabled")
76
+ return
77
+
78
+ ckpt_dir = config.checkpoint_dir
79
+ if not ckpt_dir.exists():
80
+ raise FileNotFoundError(f"Checkpoint directory {ckpt_dir} does not exist.")
81
+
82
+ if resuming:
83
+ run_id = (ckpt_dir / "wandb_id.txt").read_text().strip()
84
+ wandb.init(id=run_id, resume="must", project=config.project_name)
85
+ else:
86
+ wandb.init(
87
+ name=config.exp_name,
88
+ config=dataclasses.asdict(config),
89
+ project=config.project_name,
90
+ )
91
+ (ckpt_dir / "wandb_id.txt").write_text(wandb.run.id)
92
+
93
+
94
+ def setup_ddp():
95
+ world_size = int(os.environ.get("WORLD_SIZE", "1"))
96
+ use_ddp = world_size > 1
97
+ if use_ddp and not torch.distributed.is_initialized():
98
+ backend = "nccl" if torch.cuda.is_available() else "gloo"
99
+ torch.distributed.init_process_group(backend=backend, init_method="env://")
100
+
101
+ # Set up debugging environment variables for DDP issues
102
+ if os.environ.get("TORCH_DISTRIBUTED_DEBUG") is None:
103
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = "INFO"
104
+
105
+ local_rank = int(os.environ.get("LOCAL_RANK", os.environ.get("RANK", "0")))
106
+ device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
107
+ if torch.cuda.is_available():
108
+ torch.cuda.set_device(device)
109
+ return use_ddp, local_rank, device
110
+
111
+
112
+ def cleanup_ddp():
113
+ if torch.distributed.is_initialized():
114
+ torch.distributed.barrier()
115
+ torch.distributed.destroy_process_group()
116
+
117
+
118
+ def set_seed(seed: int, local_rank: int):
119
+ torch.manual_seed(seed + local_rank)
120
+ np.random.seed(seed + local_rank)
121
+ if torch.cuda.is_available():
122
+ torch.cuda.manual_seed_all(seed + local_rank)
123
+
124
+
125
+ def build_datasets(config: _config.TrainConfig):
126
+ # Use the unified data loader with PyTorch framework
127
+ data_loader = _data.create_data_loader(config, framework="pytorch", shuffle=True)
128
+ return data_loader, data_loader.data_config()
129
+
130
+
131
+ def get_model_state_dict(model):
132
+ """Get state dict from model, handling DDP wrapper."""
133
+ return (
134
+ model.module.state_dict()
135
+ if isinstance(model, torch.nn.parallel.DistributedDataParallel)
136
+ else model.state_dict()
137
+ )
138
+
139
+
140
+ def get_model_parameters(model):
141
+ """Get parameters from model, handling DDP wrapper."""
142
+ return (
143
+ model.module.parameters()
144
+ if isinstance(model, torch.nn.parallel.DistributedDataParallel)
145
+ else model.parameters()
146
+ )
147
+
148
+
149
+ def save_checkpoint(model, optimizer, global_step, config, is_main, data_config):
150
+ """Save a checkpoint with model state, optimizer state, and metadata."""
151
+ if not is_main:
152
+ return
153
+
154
+ # Only save if it's time to save or if it's the final step
155
+ if (global_step % config.save_interval == 0 and global_step > 0) or global_step == config.num_train_steps - 1:
156
+ # Create temporary directory for atomic checkpoint saving
157
+ final_ckpt_dir = config.checkpoint_dir / f"{global_step}"
158
+ tmp_ckpt_dir = config.checkpoint_dir / f"tmp_{global_step}"
159
+
160
+ # Remove any existing temp directory and create new one
161
+ if tmp_ckpt_dir.exists():
162
+ shutil.rmtree(tmp_ckpt_dir)
163
+ tmp_ckpt_dir.mkdir(parents=True, exist_ok=True)
164
+
165
+ # Save model state using safetensors (handle shared tensors)
166
+ model_to_save = model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model
167
+ safetensors.torch.save_model(model_to_save, tmp_ckpt_dir / "model.safetensors")
168
+
169
+ # Save optimizer state using PyTorch format
170
+ torch.save(optimizer.state_dict(), tmp_ckpt_dir / "optimizer.pt")
171
+
172
+ # Save training metadata (avoid saving full config to prevent JAX/Flax compatibility issues)
173
+ metadata = {
174
+ "global_step": global_step,
175
+ "config": dataclasses.asdict(config),
176
+ "timestamp": time.time(),
177
+ }
178
+ torch.save(metadata, tmp_ckpt_dir / "metadata.pt")
179
+
180
+ # save norm stats
181
+ norm_stats = data_config.norm_stats
182
+ if norm_stats is not None and data_config.asset_id is not None:
183
+ _normalize.save(tmp_ckpt_dir / "assets" / data_config.asset_id, norm_stats)
184
+
185
+ # Atomically move temp directory to final location
186
+ if final_ckpt_dir.exists():
187
+ shutil.rmtree(final_ckpt_dir)
188
+ tmp_ckpt_dir.rename(final_ckpt_dir)
189
+
190
+ logging.info(f"Saved checkpoint at step {global_step} -> {final_ckpt_dir}")
191
+
192
+ # Log checkpoint to wandb
193
+ if config.wandb_enabled:
194
+ wandb.log({"checkpoint_step": global_step}, step=global_step)
195
+
196
+
197
+ def load_checkpoint(model, optimizer, checkpoint_dir, device):
198
+ """Load the latest checkpoint and return the global step."""
199
+ checkpoint_steps = [
200
+ int(d.name)
201
+ for d in checkpoint_dir.iterdir()
202
+ if d.is_dir() and d.name.isdigit() and not d.name.startswith("tmp_")
203
+ ]
204
+
205
+ if not checkpoint_steps:
206
+ raise FileNotFoundError(f"No checkpoints found in {checkpoint_dir}")
207
+
208
+ latest_step = max(checkpoint_steps)
209
+ ckpt_dir = checkpoint_dir / f"{latest_step}"
210
+
211
+ # Clear memory before loading checkpoints
212
+ if torch.cuda.is_available():
213
+ torch.cuda.empty_cache()
214
+ gc.collect()
215
+ log_memory_usage(device, latest_step, "before_loading_checkpoint")
216
+
217
+ try:
218
+ # Load model state with error handling
219
+ logging.info("Loading model state...")
220
+ safetensors_path = ckpt_dir / "model.safetensors"
221
+
222
+ if safetensors_path.exists():
223
+ model_to_load = model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model
224
+ safetensors.torch.load_model(model_to_load, safetensors_path, device=str(device))
225
+ logging.info("Loaded model state from safetensors format")
226
+ else:
227
+ raise FileNotFoundError(f"No model checkpoint found at {ckpt_dir}")
228
+
229
+ torch.cuda.empty_cache()
230
+ gc.collect()
231
+ log_memory_usage(device, latest_step, "after_loading_model")
232
+
233
+ # Load optimizer state with error handling
234
+ logging.info("Loading optimizer state...")
235
+ optimizer_path = ckpt_dir / "optimizer.pt"
236
+
237
+ if optimizer_path.exists():
238
+ optimizer_state_dict = torch.load(optimizer_path, map_location=device, weights_only=False)
239
+ logging.info("Loaded optimizer state from pt format")
240
+ else:
241
+ raise FileNotFoundError(f"No optimizer checkpoint found at {ckpt_dir}")
242
+
243
+ optimizer.load_state_dict(optimizer_state_dict)
244
+ del optimizer_state_dict
245
+ torch.cuda.empty_cache()
246
+ gc.collect()
247
+ log_memory_usage(device, latest_step, "after_loading_optimizer")
248
+
249
+ # Load metadata
250
+ logging.info("Loading metadata...")
251
+ metadata = torch.load(ckpt_dir / "metadata.pt", map_location=device, weights_only=False)
252
+ global_step = metadata.get("global_step", latest_step)
253
+ del metadata
254
+ torch.cuda.empty_cache()
255
+ gc.collect()
256
+ log_memory_usage(device, latest_step, "after_loading_metadata")
257
+
258
+ logging.info(f"Successfully loaded all checkpoint components from step {latest_step}")
259
+ return global_step
260
+
261
+ except RuntimeError as e:
262
+ if "out of memory" in str(e):
263
+ # Clear memory and provide detailed error message
264
+ torch.cuda.empty_cache()
265
+ gc.collect()
266
+ logging.error(f"Out of memory error while loading checkpoint: {e!s}")
267
+ log_memory_usage(device, latest_step, "after_oom_error")
268
+ raise RuntimeError(
269
+ "Out of memory while loading checkpoint. Try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True"
270
+ ) from e
271
+ raise
272
+
273
+
274
+ def get_latest_checkpoint_step(checkpoint_dir):
275
+ """Get the latest checkpoint step number from a checkpoint directory."""
276
+ checkpoint_steps = [
277
+ int(d.name)
278
+ for d in checkpoint_dir.iterdir()
279
+ if d.is_dir() and d.name.isdigit() and not d.name.startswith("tmp_")
280
+ ]
281
+ return max(checkpoint_steps) if checkpoint_steps else None
282
+
283
+
284
+ def log_memory_usage(device, step, phase="unknown"):
285
+ """Log detailed memory usage information."""
286
+ if not torch.cuda.is_available():
287
+ return
288
+
289
+ memory_allocated = torch.cuda.memory_allocated(device) / 1e9
290
+ memory_reserved = torch.cuda.memory_reserved(device) / 1e9
291
+ memory_free = torch.cuda.memory_reserved(device) - torch.cuda.memory_allocated(device)
292
+ memory_free = memory_free / 1e9
293
+
294
+ # Get more detailed memory info
295
+ memory_stats = torch.cuda.memory_stats(device)
296
+ max_memory_allocated = memory_stats.get("allocated_bytes.all.peak", 0) / 1e9
297
+ max_memory_reserved = memory_stats.get("reserved_bytes.all.peak", 0) / 1e9
298
+
299
+ # Get DDP info if available
300
+ ddp_info = ""
301
+ if dist.is_initialized():
302
+ ddp_info = f" | DDP: rank={dist.get_rank()}, world_size={dist.get_world_size()}"
303
+
304
+ logging.info(
305
+ f"Step {step} ({phase}): GPU memory - allocated: {memory_allocated:.2f}GB, reserved: {memory_reserved:.2f}GB, free: {memory_free:.2f}GB, peak_allocated: {max_memory_allocated:.2f}GB, peak_reserved: {max_memory_reserved:.2f}GB{ddp_info}"
306
+ )
307
+
308
+
309
+ def train_loop(config: _config.TrainConfig):
310
+ use_ddp, local_rank, device = setup_ddp()
311
+ is_main = (not use_ddp) or (dist.get_rank() == 0)
312
+ set_seed(config.seed, local_rank)
313
+
314
+ # Initialize checkpoint directory and wandb
315
+ resuming = False
316
+ if config.resume:
317
+ # Find checkpoint directory based on experiment name
318
+ exp_checkpoint_dir = config.checkpoint_dir
319
+ if exp_checkpoint_dir.exists():
320
+ # Use validation to find the latest working checkpoint
321
+ latest_step = get_latest_checkpoint_step(exp_checkpoint_dir)
322
+ if latest_step is not None:
323
+ resuming = True
324
+ logging.info(
325
+ f"Resuming from experiment checkpoint directory: {exp_checkpoint_dir} at step {latest_step}"
326
+ )
327
+ else:
328
+ raise FileNotFoundError(f"No valid checkpoints found in {exp_checkpoint_dir} for resume")
329
+ else:
330
+ raise FileNotFoundError(f"Experiment checkpoint directory {exp_checkpoint_dir} does not exist for resume")
331
+ elif config.overwrite and config.checkpoint_dir.exists():
332
+ shutil.rmtree(config.checkpoint_dir)
333
+ logging.info(f"Overwriting checkpoint directory: {config.checkpoint_dir}")
334
+
335
+ # Create checkpoint directory with experiment name
336
+ if not resuming:
337
+ # For new runs, create experiment-specific checkpoint directory
338
+ exp_checkpoint_dir = config.checkpoint_dir
339
+ exp_checkpoint_dir.mkdir(parents=True, exist_ok=True)
340
+ logging.info(f"Created experiment checkpoint directory: {exp_checkpoint_dir}")
341
+ else:
342
+ # For resume, checkpoint_dir is already set to the experiment directory
343
+ logging.info(f"Using existing experiment checkpoint directory: {config.checkpoint_dir}")
344
+
345
+ # Initialize wandb (only on main process)
346
+ if is_main:
347
+ init_wandb(config, resuming=resuming, enabled=config.wandb_enabled)
348
+
349
+ # Build data loader using the unified data loader
350
+ # Calculate effective batch size per GPU for DDP
351
+ # For N GPUs, each GPU should get batch_size/N samples, so total across all GPUs is batch_size
352
+ world_size = torch.distributed.get_world_size() if use_ddp else 1
353
+ effective_batch_size = config.batch_size // world_size
354
+ logging.info(
355
+ f"Using batch size per GPU: {effective_batch_size} (total batch size across {world_size} GPUs: {config.batch_size})"
356
+ )
357
+
358
+ # Pass the original batch size to data loader - it will handle DDP splitting internally
359
+ loader, data_config = build_datasets(config)
360
+
361
+ # Log sample images to wandb on first batch
362
+ if is_main and config.wandb_enabled and not resuming:
363
+ # Create a separate data loader for sample batch to avoid consuming the main loader
364
+ sample_data_loader = _data.create_data_loader(config, framework="pytorch", shuffle=False)
365
+ sample_batch = next(iter(sample_data_loader))
366
+ # Convert observation and actions to torch tensors
367
+ observation, actions = sample_batch
368
+ sample_batch = observation.to_dict()
369
+ sample_batch["actions"] = actions
370
+
371
+ # Create sample images for wandb
372
+ images_to_log = []
373
+ # Get batch size from the first image tensor
374
+ batch_size = next(iter(sample_batch["image"].values())).shape[0]
375
+ for i in range(min(5, batch_size)):
376
+ # Concatenate all camera views horizontally for this batch item
377
+ # Convert from NCHW to NHWC format for wandb
378
+ img_concatenated = torch.cat([img[i].permute(1, 2, 0) for img in sample_batch["image"].values()], axis=1)
379
+ img_concatenated = img_concatenated.cpu().numpy()
380
+ images_to_log.append(wandb.Image(img_concatenated))
381
+
382
+ wandb.log({"camera_views": images_to_log}, step=0)
383
+
384
+ # Clear sample batch from memory aggressively
385
+ del sample_batch, observation, actions, images_to_log, img_concatenated
386
+ del sample_data_loader # Also delete the sample data loader
387
+ gc.collect()
388
+ if torch.cuda.is_available():
389
+ torch.cuda.empty_cache()
390
+ logging.info("Cleared sample batch and data loader from memory")
391
+
392
+ # Build model
393
+ if not isinstance(config.model, openpi.models.pi0_config.Pi0Config):
394
+ # Convert dataclass to Pi0Config if needed
395
+ model_cfg = openpi.models.pi0_config.Pi0Config(
396
+ dtype=config.pytorch_training_precision,
397
+ action_dim=config.model.action_dim,
398
+ action_horizon=config.model.action_horizon,
399
+ max_token_len=config.model.max_token_len,
400
+ paligemma_variant=getattr(config.model, "paligemma_variant", "gemma_2b"),
401
+ action_expert_variant=getattr(config.model, "action_expert_variant", "gemma_300m"),
402
+ pi05=getattr(config.model, "pi05", False),
403
+ )
404
+ else:
405
+ model_cfg = config.model
406
+ # Update dtype to match pytorch_training_precision
407
+ object.__setattr__(model_cfg, "dtype", config.pytorch_training_precision)
408
+
409
+ model = openpi.models_pytorch.pi0_pytorch.PI0Pytorch(model_cfg).to(device)
410
+
411
+ if hasattr(model, "gradient_checkpointing_enable"):
412
+ enable_gradient_checkpointing = True
413
+ model.gradient_checkpointing_enable()
414
+ logging.info("Enabled gradient checkpointing for memory optimization")
415
+ else:
416
+ enable_gradient_checkpointing = False
417
+ logging.info("Gradient checkpointing is not supported for this model")
418
+
419
+ # Log initial memory usage after model creation
420
+ if is_main and torch.cuda.is_available():
421
+ log_memory_usage(device, 0, "after_model_creation")
422
+
423
+ # Enable memory optimizations for large-scale training
424
+ if world_size >= 8:
425
+ torch.backends.cudnn.benchmark = True
426
+ torch.backends.cuda.matmul.allow_tf32 = True
427
+ torch.backends.cudnn.allow_tf32 = True
428
+ # Set memory allocation configuration
429
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128,expandable_segments:True"
430
+ logging.info("Enabled memory optimizations for 8+ GPU training")
431
+
432
+ if use_ddp:
433
+ model = torch.nn.parallel.DistributedDataParallel(
434
+ model,
435
+ device_ids=[device.index] if device.type == "cuda" else None,
436
+ find_unused_parameters=True, # Disable for memory efficiency
437
+ gradient_as_bucket_view=True, # Enable for memory efficiency
438
+ static_graph=world_size >= 8, # Enable for 8+ GPUs
439
+ )
440
+
441
+ # Load weights from weight_loader if specified (for fine-tuning)
442
+ if config.pytorch_weight_path is not None:
443
+ logging.info(f"Loading weights from: {config.pytorch_weight_path}")
444
+
445
+ model_path = os.path.join(config.pytorch_weight_path, "model.safetensors")
446
+ safetensors.torch.load_model(
447
+ (model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model), model_path
448
+ )
449
+ logging.info(f"Loaded PyTorch weights from {config.pytorch_weight_path}")
450
+
451
+ # Optimizer + learning rate schedule from config
452
+ warmup_steps = config.lr_schedule.warmup_steps
453
+ peak_lr = config.lr_schedule.peak_lr
454
+ decay_steps = config.lr_schedule.decay_steps
455
+ end_lr = config.lr_schedule.decay_lr
456
+
457
+ # Create optimizer with config parameters
458
+ optim = torch.optim.AdamW(
459
+ model.parameters(),
460
+ lr=peak_lr,
461
+ betas=(config.optimizer.b1, config.optimizer.b2),
462
+ eps=config.optimizer.eps,
463
+ weight_decay=config.optimizer.weight_decay,
464
+ )
465
+
466
+ # Load checkpoint if resuming
467
+ global_step = 0
468
+ if resuming:
469
+ global_step = load_checkpoint(model, optim, config.checkpoint_dir, device)
470
+ logging.info(f"Resumed training from step {global_step}")
471
+
472
+ def lr_schedule(step: int):
473
+ if step < warmup_steps:
474
+ # Match JAX behavior: start from peak_lr / (warmup_steps + 1)
475
+ init_lr = peak_lr / (warmup_steps + 1)
476
+ return init_lr + (peak_lr - init_lr) * step / warmup_steps
477
+ # cosine decay
478
+ progress = min(1.0, (step - warmup_steps) / max(1, decay_steps - warmup_steps))
479
+ cos = 0.5 * (1 + np.cos(np.pi * progress))
480
+ return end_lr + (peak_lr - end_lr) * cos
481
+
482
+ model.train()
483
+ start_time = time.time()
484
+ infos = [] # Collect stats over log interval
485
+ if is_main:
486
+ logging.info(
487
+ f"Running on: {platform.node()} | world_size={torch.distributed.get_world_size() if use_ddp else 1}"
488
+ )
489
+ logging.info(
490
+ f"Training config: batch_size={config.batch_size}, effective_batch_size={effective_batch_size}, num_train_steps={config.num_train_steps}"
491
+ )
492
+ logging.info(f"Memory optimizations: gradient_checkpointing={enable_gradient_checkpointing}")
493
+ logging.info(
494
+ f"LR schedule: warmup={warmup_steps}, peak_lr={peak_lr:.2e}, decay_steps={decay_steps}, end_lr={end_lr:.2e}"
495
+ )
496
+ logging.info(
497
+ f"Optimizer: {type(config.optimizer).__name__}, weight_decay={config.optimizer.weight_decay}, clip_norm={config.optimizer.clip_gradient_norm}"
498
+ )
499
+ logging.info("EMA is not supported for PyTorch training")
500
+ logging.info(f"Training precision: {model_cfg.dtype}")
501
+
502
+ # Training loop - iterate until we reach num_train_steps
503
+ pbar = (
504
+ tqdm.tqdm(total=config.num_train_steps, initial=global_step, desc="Training", disable=not is_main)
505
+ if is_main
506
+ else None
507
+ )
508
+
509
+ while global_step < config.num_train_steps:
510
+ # Set epoch for distributed training
511
+ if use_ddp and hasattr(loader, "set_epoch"):
512
+ loader.set_epoch(global_step // len(loader))
513
+
514
+ for observation, actions in loader:
515
+ # Check if we've reached the target number of steps
516
+ if global_step >= config.num_train_steps:
517
+ break
518
+
519
+ # The unified data loader returns (observation, actions) tuple
520
+ observation = jax.tree.map(lambda x: x.to(device), observation) # noqa: PLW2901
521
+ actions = actions.to(torch.float32) # noqa: PLW2901
522
+ actions = actions.to(device) # noqa: PLW2901
523
+
524
+ # Update LR
525
+ for pg in optim.param_groups:
526
+ pg["lr"] = lr_schedule(global_step)
527
+
528
+ # Forward pass
529
+ losses = model(observation, actions)
530
+ # Ensure losses is a tensor and handle different return types
531
+ if isinstance(losses, list | tuple):
532
+ losses = torch.stack(losses)
533
+ elif not isinstance(losses, torch.Tensor):
534
+ losses = torch.tensor(losses, device=device, dtype=torch.float32)
535
+
536
+ loss = losses.mean()
537
+
538
+ # Backward pass
539
+ loss.backward()
540
+
541
+ # Log memory usage after backward pass
542
+ if global_step < 5 and is_main and torch.cuda.is_available():
543
+ log_memory_usage(device, global_step, "after_backward")
544
+
545
+ # Gradient clipping
546
+ grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=config.optimizer.clip_gradient_norm)
547
+
548
+ # Optimizer step
549
+ optim.step()
550
+ optim.zero_grad(set_to_none=True)
551
+
552
+ # Clear gradients more aggressively
553
+ for param in model.parameters():
554
+ if param.grad is not None:
555
+ param.grad.detach_()
556
+ param.grad = None
557
+
558
+ # Collect stats
559
+ if is_main:
560
+ infos.append(
561
+ {
562
+ "loss": loss.item(),
563
+ "learning_rate": optim.param_groups[0]["lr"],
564
+ "grad_norm": float(grad_norm) if isinstance(grad_norm, torch.Tensor) else grad_norm,
565
+ }
566
+ )
567
+
568
+ if is_main and (global_step % config.log_interval == 0):
569
+ elapsed = time.time() - start_time
570
+
571
+ # Average stats over log interval
572
+ avg_loss = sum(info["loss"] for info in infos) / len(infos)
573
+ avg_lr = sum(info["learning_rate"] for info in infos) / len(infos)
574
+
575
+ avg_grad_norm = None
576
+ if any("grad_norm" in info for info in infos):
577
+ vals = [
578
+ info["grad_norm"] for info in infos if "grad_norm" in info and info["grad_norm"] is not None
579
+ ]
580
+ if len(vals) > 0:
581
+ avg_grad_norm = sum(vals) / len(vals)
582
+ logging.info(
583
+ f"step={global_step} loss={avg_loss:.4f} lr={avg_lr:.2e} grad_norm={avg_grad_norm:.2f} time={elapsed:.1f}s"
584
+ if avg_grad_norm is not None
585
+ else f"step={global_step} loss={avg_loss:.4f} lr={avg_lr:.2e} time={elapsed:.1f}s"
586
+ )
587
+
588
+ # Log to wandb
589
+ if config.wandb_enabled and len(infos) > 0:
590
+ log_payload = {
591
+ "loss": avg_loss,
592
+ "learning_rate": avg_lr,
593
+ "step": global_step,
594
+ "time_per_step": elapsed / config.log_interval,
595
+ }
596
+ if avg_grad_norm is not None:
597
+ log_payload["grad_norm"] = avg_grad_norm
598
+ wandb.log(log_payload, step=global_step)
599
+
600
+ start_time = time.time()
601
+ infos = [] # Reset stats collection
602
+
603
+ global_step += 1
604
+ # Save checkpoint using the new mechanism
605
+ save_checkpoint(model, optim, global_step, config, is_main, data_config)
606
+
607
+ # Update progress bar
608
+ if pbar is not None:
609
+ pbar.update(1)
610
+ pbar.set_postfix(
611
+ {"loss": f"{loss.item():.4f}", "lr": f"{optim.param_groups[0]['lr']:.2e}", "step": global_step}
612
+ )
613
+
614
+ # Close progress bar
615
+ if pbar is not None:
616
+ pbar.close()
617
+
618
+ # Finish wandb run
619
+ if is_main and config.wandb_enabled:
620
+ wandb.finish()
621
+
622
+ cleanup_ddp()
623
+
624
+
625
+ def main():
626
+ init_logging()
627
+ config = _config.cli()
628
+ train_loop(config)
629
+
630
+
631
+ if __name__ == "__main__":
632
+ main()
scripts/train_test.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import os
3
+ import pathlib
4
+
5
+ import pytest
6
+
7
+ os.environ["JAX_PLATFORMS"] = "cpu"
8
+
9
+ from openpi.training import config as _config
10
+
11
+ from . import train
12
+
13
+
14
+ @pytest.mark.parametrize("config_name", ["debug"])
15
+ def test_train(tmp_path: pathlib.Path, config_name: str):
16
+ config = dataclasses.replace(
17
+ _config._CONFIGS_DICT[config_name], # noqa: SLF001
18
+ batch_size=2,
19
+ checkpoint_base_dir=str(tmp_path / "checkpoint"),
20
+ exp_name="test",
21
+ overwrite=False,
22
+ resume=False,
23
+ num_train_steps=2,
24
+ log_interval=1,
25
+ )
26
+ train.main(config)
27
+
28
+ # test resuming
29
+ config = dataclasses.replace(config, resume=True, num_train_steps=4)
30
+ train.main(config)
src/openpi/__init__.py ADDED
File without changes
src/openpi/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (171 Bytes). View file
 
src/openpi/__pycache__/transforms.cpython-311.pyc ADDED
Binary file (26.2 kB). View file
 
src/openpi/conftest.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pynvml
4
+ import pytest
5
+
6
+
7
+ def set_jax_cpu_backend_if_no_gpu() -> None:
8
+ try:
9
+ pynvml.nvmlInit()
10
+ pynvml.nvmlShutdown()
11
+ except pynvml.NVMLError:
12
+ # No GPU found.
13
+ os.environ["JAX_PLATFORMS"] = "cpu"
14
+
15
+
16
+ def pytest_configure(config: pytest.Config) -> None:
17
+ set_jax_cpu_backend_if_no_gpu()
src/openpi/models/__init__.py ADDED
File without changes
src/openpi/models/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (178 Bytes). View file
 
src/openpi/models/__pycache__/gemma.cpython-311.pyc ADDED
Binary file (27.3 kB). View file
 
src/openpi/models/__pycache__/gemma_fast.cpython-311.pyc ADDED
Binary file (21.6 kB). View file
 
src/openpi/models/__pycache__/lora.cpython-311.pyc ADDED
Binary file (9.01 kB). View file
 
src/openpi/models/__pycache__/model.cpython-311.pyc ADDED
Binary file (17.8 kB). View file
 
src/openpi/models/__pycache__/pi0.cpython-311.pyc ADDED
Binary file (17.1 kB). View file
 
src/openpi/models/__pycache__/pi0_config.cpython-311.pyc ADDED
Binary file (5.8 kB). View file
 
src/openpi/models/__pycache__/pi0_fast.cpython-311.pyc ADDED
Binary file (19.2 kB). View file
 
src/openpi/models/__pycache__/siglip.cpython-311.pyc ADDED
Binary file (16.3 kB). View file
 
src/openpi/models/__pycache__/tokenizer.cpython-311.pyc ADDED
Binary file (23.6 kB). View file
 
src/openpi/models/gemma.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Big Vision Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Gemma adaptation for Pi, taken from big_vision.
16
+
17
+ We follow this einsum axis naming convention:
18
+ B: batch
19
+ T: query length
20
+ S: k/v length
21
+ N: num query heads
22
+ K: num k/v heads
23
+ G: num query heads per k/v head
24
+ H: head dim
25
+ D: d_model ("features")
26
+ """
27
+
28
+ from collections.abc import Sequence
29
+ import dataclasses
30
+ from typing import Literal, TypeAlias
31
+
32
+ import einops
33
+ import flax.linen as nn
34
+ import jax
35
+ import jax.numpy as jnp
36
+
37
+ import openpi.models.lora as lora
38
+ import openpi.shared.array_typing as at
39
+ import openpi.training.sharding as sharding
40
+
41
+ PALIGEMMA_VOCAB_SIZE = 257_152
42
+
43
+
44
+ @dataclasses.dataclass
45
+ class Config:
46
+ width: int
47
+ depth: int
48
+ mlp_dim: int
49
+ num_heads: int
50
+ num_kv_heads: int
51
+ head_dim: int
52
+ lora_configs: dict[str, lora.LoRAConfig] = dataclasses.field(default_factory=dict)
53
+
54
+
55
+ Variant = Literal["dummy", "gemma_300m", "gemma_300m_lora", "gemma_2b", "gemma_2b_lora"]
56
+
57
+
58
+ def get_config(variant: Variant) -> Config:
59
+ """Returns config for specified gemma variant."""
60
+ if variant == "dummy":
61
+ return Config(
62
+ width=64,
63
+ depth=4,
64
+ mlp_dim=128,
65
+ num_heads=8,
66
+ num_kv_heads=1,
67
+ head_dim=16,
68
+ )
69
+ if variant == "gemma_300m":
70
+ # 311M params
71
+ return Config(
72
+ width=1024,
73
+ depth=18,
74
+ mlp_dim=4096,
75
+ num_heads=8,
76
+ num_kv_heads=1,
77
+ head_dim=256,
78
+ )
79
+ if variant == "gemma_2b":
80
+ return Config(
81
+ width=2048,
82
+ depth=18,
83
+ mlp_dim=16_384,
84
+ num_heads=8,
85
+ num_kv_heads=1,
86
+ head_dim=256,
87
+ )
88
+ if variant == "gemma_2b_lora":
89
+ return Config(
90
+ width=2048,
91
+ depth=18,
92
+ mlp_dim=16_384,
93
+ num_heads=8,
94
+ num_kv_heads=1,
95
+ head_dim=256,
96
+ lora_configs={"attn": lora.LoRAConfig(rank=16, alpha=16.0), "ffn": lora.LoRAConfig(rank=16, alpha=16.0)},
97
+ )
98
+ if variant == "gemma_300m_lora":
99
+ # 311M params
100
+ return Config(
101
+ width=1024,
102
+ depth=18,
103
+ mlp_dim=4096,
104
+ num_heads=8,
105
+ num_kv_heads=1,
106
+ head_dim=256,
107
+ lora_configs={"attn": lora.LoRAConfig(rank=32, alpha=32.0), "ffn": lora.LoRAConfig(rank=32, alpha=32.0)},
108
+ )
109
+ raise ValueError(f"Unknown variant: {variant}")
110
+
111
+
112
+ @at.typecheck
113
+ class RMSNorm(nn.Module):
114
+ @nn.compact
115
+ def __call__(self, x, cond):
116
+ dtype = x.dtype # original dtype, could be half-precision
117
+ var = jnp.mean(jnp.square(x.astype(jnp.float32)), axis=-1, keepdims=True) # compute variance in float32
118
+ normed_inputs = jnp.asarray(x * jnp.reciprocal(jnp.sqrt(var + 1e-06))) # compute normalization in float32
119
+ if cond is None:
120
+ # regular RMSNorm
121
+ scale = self.param("scale", nn.initializers.zeros_init(), (x.shape[-1]))
122
+ normed_inputs = normed_inputs * (
123
+ 1 + scale
124
+ ) # scale by learned parameter in float32 (matches Flax implementation)
125
+ return normed_inputs.astype(dtype), None # return in original dtype
126
+
127
+ # adaptive RMSNorm
128
+ modulation = nn.Dense(x.shape[-1] * 3, kernel_init=nn.initializers.zeros, dtype=dtype)(cond)
129
+ scale, shift, gate = jnp.split(modulation[:, None, :], 3, axis=-1)
130
+ normed_inputs = normed_inputs * (1 + scale) + shift # scale and shift in float32
131
+ return normed_inputs.astype(dtype), gate
132
+
133
+
134
+ @at.typecheck
135
+ class Embedder(nn.Module):
136
+ """Embedder module."""
137
+
138
+ vocab_size: int
139
+ embed_dim: int
140
+
141
+ def setup(self):
142
+ self.input_embedding_table = self.param(
143
+ "input_embedding",
144
+ nn.initializers.normal(),
145
+ (self.vocab_size, self.embed_dim),
146
+ )
147
+
148
+ def encode(self, x):
149
+ x = self.input_embedding_table[(x,)]
150
+ x *= jnp.sqrt(self.embed_dim).astype(x.dtype)
151
+ return x
152
+
153
+ def decode(self, x):
154
+ return jnp.dot(x, self.input_embedding_table.T)
155
+
156
+
157
+ @at.typecheck
158
+ class Attention(nn.Module):
159
+ """Attention module."""
160
+
161
+ configs: Sequence[Config]
162
+
163
+ @nn.compact
164
+ def __call__(self, xs, positions, attn_mask, kv_cache):
165
+ # all experts must share the same head dim, num heads, and num kv heads for self-attention to work
166
+ assert all(config.head_dim == self.configs[0].head_dim for config in self.configs)
167
+ assert all(config.num_heads == self.configs[0].num_heads for config in self.configs)
168
+ assert all(config.num_kv_heads == self.configs[0].num_kv_heads for config in self.configs)
169
+
170
+ dtype = next(x.dtype for x in xs if x is not None) # original dtype, could be half-precision
171
+
172
+ qkvs = []
173
+ for i, (x, config) in enumerate(zip(xs, self.configs, strict=True)):
174
+ if x is None:
175
+ continue
176
+ if config.num_kv_heads == config.num_heads:
177
+ qkv_einsum = lora.Einsum(
178
+ shape=(3, config.num_heads, config.width, config.head_dim),
179
+ name=_name("qkv_einsum", i),
180
+ init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)),
181
+ lora_config=config.lora_configs.get("attn"),
182
+ )
183
+ qkvs.append(qkv_einsum("BSD,3KDH->3BSKH", x))
184
+ else:
185
+ q_einsum = lora.Einsum(
186
+ shape=(config.num_heads, config.width, config.head_dim),
187
+ name=_name("q_einsum", i),
188
+ init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)),
189
+ lora_config=config.lora_configs.get("attn"),
190
+ )
191
+ q = q_einsum("BTD,NDH->BTNH", x)
192
+ kv_einsum = lora.Einsum(
193
+ shape=(2, config.num_kv_heads, config.width, config.head_dim),
194
+ name=_name("kv_einsum", i),
195
+ init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)),
196
+ lora_config=config.lora_configs.get("attn"),
197
+ )
198
+ k, v = kv_einsum("BSD,2KDH->2BSKH", x)
199
+ qkvs.append((q, k, v))
200
+
201
+ q, k, v = (jnp.concatenate(y, axis=1) for y in zip(*qkvs, strict=True))
202
+
203
+ q = _apply_rope(q, positions=positions)
204
+ q *= self.configs[0].head_dim ** -0.5
205
+
206
+ k = _apply_rope(k, positions=positions)
207
+
208
+ # should still be half-precision here (if input was half-precision)
209
+ assert q.dtype == k.dtype == v.dtype == dtype
210
+
211
+ if kv_cache is not None:
212
+ cache_k, cache_v = kv_cache
213
+ k = jnp.concatenate([cache_k, k], axis=1)
214
+ v = jnp.concatenate([cache_v, v], axis=1)
215
+
216
+ q = einops.rearrange(q, "B T (K G) H -> B T K G H", K=self.configs[0].num_kv_heads)
217
+ logits = jnp.einsum("BTKGH,BSKH->BKGTS", q, k, preferred_element_type=jnp.float32)
218
+
219
+ if attn_mask.shape != (q.shape[0], 1, q.shape[1], k.shape[1]):
220
+ raise ValueError(
221
+ f"Attention mask with shape {attn_mask.shape} but shapes for q and k are: {q.shape} and {k.shape}"
222
+ )
223
+
224
+ # big_neg = jnp.finfo(logits.dtype).min
225
+ big_neg = -2.3819763e38 # See gemma/modules.py
226
+ masked_logits = jnp.where(attn_mask[:, :, None, :, :], logits, big_neg)
227
+
228
+ probs = jax.nn.softmax(masked_logits, axis=-1).astype(dtype)
229
+
230
+ encoded = jnp.einsum("BKGTS,BSKH->BTKGH", probs, v)
231
+ encoded = einops.rearrange(encoded, "B T K G H -> B T (K G) H")
232
+
233
+ out = []
234
+ start = 0
235
+ for i, (x, config) in enumerate(zip(xs, self.configs, strict=True)):
236
+ if x is not None:
237
+ end = start + x.shape[1]
238
+ out_einsum = lora.Einsum(
239
+ shape=(config.num_heads, config.head_dim, config.width),
240
+ name=_name("attn_vec_einsum", i),
241
+ init_fn=nn.initializers.lecun_normal(in_axis=(-3, -2), out_axis=-1),
242
+ lora_config=config.lora_configs.get("attn"),
243
+ )
244
+ out.append(out_einsum("BTNH,NHD->BTD", encoded[:, start:end]))
245
+ start = end
246
+ else:
247
+ out.append(None)
248
+
249
+ return out, (k, v)
250
+
251
+
252
+ @at.typecheck
253
+ class FeedForward(nn.Module):
254
+ """Feed forward module."""
255
+
256
+ features: int
257
+ hidden_dim: int
258
+
259
+ @nn.compact
260
+ def __call__(self, x):
261
+ dtype = x.dtype # original dtype, could be half-precision
262
+ w_gating = self.param(
263
+ "gating_einsum",
264
+ nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)),
265
+ (2, self.features, self.hidden_dim),
266
+ ).astype(dtype)
267
+ ff_gate = jnp.dot(x, w_gating[0])
268
+ gate_value = nn.gelu(ff_gate)
269
+
270
+ ff1 = jnp.dot(x, w_gating[1])
271
+ activations = gate_value * ff1
272
+
273
+ w_linear = self.param(
274
+ "linear",
275
+ nn.initializers.lecun_normal(in_axis=-2, out_axis=-1),
276
+ (self.hidden_dim, self.features),
277
+ ).astype(dtype)
278
+ outputs = jnp.dot(activations, w_linear)
279
+ assert outputs.dtype == dtype
280
+ return outputs
281
+
282
+
283
+ @at.typecheck
284
+ class Block(nn.Module):
285
+ """Transformer block."""
286
+
287
+ configs: tuple[Config, ...]
288
+
289
+ dropout: float = 0.0
290
+ dropout_bdims: tuple[int, ...] = ()
291
+
292
+ @nn.compact
293
+ def __call__(self, xs, kv_cache, positions, attn_mask, adarms_cond, deterministic=True): # noqa: FBT002
294
+ xs = sharding.activation_sharding_constraint(xs)
295
+ drop = nn.Dropout(self.dropout, self.dropout_bdims) if self.dropout else lambda x, _: x
296
+
297
+ attn = Attention(configs=self.configs, name="attn")
298
+
299
+ pre_attn = []
300
+ gates = []
301
+ for i, x in enumerate(xs):
302
+ if x is not None:
303
+ x, gate = RMSNorm(name=_name("pre_attention_norm", i))(x, adarms_cond[i]) # noqa: PLW2901
304
+ pre_attn.append(x)
305
+ gates.append(gate if x is not None else None)
306
+
307
+ pre_attn = sharding.activation_sharding_constraint(pre_attn)
308
+ post_attn, kv_cache = attn(pre_attn, positions, attn_mask, kv_cache)
309
+ post_attn = jax.tree.map(lambda x: drop(x, deterministic), post_attn)
310
+ post_attn = sharding.activation_sharding_constraint(post_attn)
311
+ xs = [_gated_residual(x, y, gate) for x, y, gate in zip(xs, post_attn, gates, strict=True)]
312
+ xs = sharding.activation_sharding_constraint(xs)
313
+
314
+ out = []
315
+ gates = []
316
+ for i, (x, config) in enumerate(zip(xs, self.configs, strict=True)):
317
+ if x is not None:
318
+ x, gate = RMSNorm(name=_name("pre_ffw_norm", i))(x, adarms_cond[i]) # noqa: PLW2901
319
+ x = lora.FeedForward( # noqa: PLW2901
320
+ features=config.width,
321
+ hidden_dim=config.mlp_dim,
322
+ name=_name("mlp", i),
323
+ lora_config=config.lora_configs.get("ffn"),
324
+ )(x)
325
+ out.append(x)
326
+ gates.append(gate if x is not None else None)
327
+
328
+ out = sharding.activation_sharding_constraint(out)
329
+ out = jax.tree.map(lambda x: drop(x, deterministic), out)
330
+ xs = [_gated_residual(x, y, gate) for x, y, gate in zip(xs, out, gates, strict=True)]
331
+ xs = sharding.activation_sharding_constraint(xs)
332
+
333
+ return xs, kv_cache
334
+
335
+
336
+ KVCache: TypeAlias = tuple[at.Float[at.Array, "l b _t _k _h"], at.Float[at.Array, "l b _t _v _h"]]
337
+
338
+
339
+ @at.typecheck
340
+ class Module(nn.Module):
341
+ """Transformer model, supporting a mixture of different weights for different tokens."""
342
+
343
+ configs: Sequence[Config] # list of configs, one for each expert
344
+ embed_dtype: str
345
+
346
+ dropout: float = 0.0
347
+ dropout_bdims: tuple[int, ...] = () # Every float is dropped independently.
348
+ adarms: bool = False
349
+
350
+ def setup(self):
351
+ # all experts must have the same depth
352
+ assert all(config.depth == self.configs[0].depth for config in self.configs)
353
+
354
+ self.embedder = Embedder(
355
+ vocab_size=PALIGEMMA_VOCAB_SIZE,
356
+ embed_dim=self.configs[0].width, # embedder for first expert only
357
+ name="embedder",
358
+ )
359
+ block_cls = nn.remat(
360
+ Block,
361
+ prevent_cse=False,
362
+ static_argnums=(5,), # 0=self, 6=deterministic
363
+ policy=jax.checkpoint_policies.nothing_saveable,
364
+ )
365
+ self.layers = nn.scan(
366
+ block_cls,
367
+ variable_axes={"params": 0},
368
+ split_rngs={"params": True, "dropout": True},
369
+ in_axes=(
370
+ 0,
371
+ nn.broadcast,
372
+ nn.broadcast,
373
+ nn.broadcast,
374
+ nn.broadcast,
375
+ ), # 0=kv_cache, 1=positions, 2=mask, 3=adarms_cond, 4=deterministic
376
+ length=self.configs[0].depth,
377
+ )(
378
+ configs=self.configs,
379
+ dropout=self.dropout,
380
+ dropout_bdims=self.dropout_bdims,
381
+ )
382
+ self.final_norms = [RMSNorm(name=_name("final_norm", i)) for i in range(len(self.configs))]
383
+
384
+ @at.typecheck
385
+ def embed(self, tokens: at.Int[at.Array, "b t"]) -> at.Float[at.Array, "b t d"]:
386
+ return self.embedder.encode(tokens).astype(self.embed_dtype)
387
+
388
+ @at.typecheck
389
+ def __call__(
390
+ self,
391
+ # list of token arrays, one for each expert, or None if that expert should not be run
392
+ embedded: Sequence[at.Float[at.Array, "b _t _d"] | None],
393
+ positions: at.Int[at.Array, "b t"],
394
+ mask: at.Bool[at.Array, "b t s"],
395
+ adarms_cond: Sequence[at.Float[at.Array, "b _d"] | None] | None = None,
396
+ *,
397
+ kv_cache: KVCache | None = None,
398
+ deterministic: bool = True,
399
+ ) -> tuple[Sequence[at.Float[at.Array, "b _t _d"] | None], KVCache]:
400
+ embedded = jax.tree.map(lambda e: e.astype(self.embed_dtype), embedded)
401
+ mask = jnp.asarray(mask)[:, None, :, :]
402
+ if adarms_cond is None:
403
+ adarms_cond = [None] * len(self.configs)
404
+
405
+ embedded, kv_cache = self.layers(embedded, kv_cache, positions, mask, adarms_cond, deterministic)
406
+
407
+ assert all(e.dtype == jnp.dtype(self.embed_dtype) for e in embedded if e is not None)
408
+
409
+ return [
410
+ f(e, a)[0] if e is not None else e for f, e, a in zip(self.final_norms, embedded, adarms_cond, strict=True)
411
+ ], kv_cache
412
+
413
+ def init(self, use_adarms: Sequence[bool]):
414
+ """Convenience method for initializing all parameters, necessary due to the quirks of linen."""
415
+ self.embed(jnp.zeros((1, 1), dtype=jnp.int32))
416
+ self(
417
+ [jnp.zeros((1, 1, c.width)) for c in self.configs],
418
+ jnp.zeros((1, len(self.configs)), dtype=jnp.int32),
419
+ jnp.zeros((1, len(self.configs), len(self.configs)), dtype=bool),
420
+ adarms_cond=[jnp.zeros((1, c.width)) if u else None for u, c in zip(use_adarms, self.configs, strict=True)],
421
+ )
422
+
423
+
424
+ def _apply_rope(x, *, positions, max_wavelength=10_000):
425
+ """Applies RoPE positions [B, L] to x [B, L, H, D]."""
426
+ freq_exponents = (2.0 / x.shape[-1]) * jnp.arange(x.shape[-1] // 2, dtype=jnp.float32)
427
+ timescale = max_wavelength**freq_exponents
428
+ radians = positions[..., None] / timescale[None, None, :]
429
+ radians = radians[..., None, :]
430
+ assert radians.dtype == jnp.float32
431
+ # radians.shape = [...,L,1,d=D/2]
432
+ sin, cos = jnp.sin(radians), jnp.cos(radians)
433
+ x1, x2 = jnp.split(x, 2, axis=-1)
434
+ res = jnp.concatenate([x1 * cos - x2 * sin, x2 * cos + x1 * sin], axis=-1)
435
+ assert res.dtype == jnp.float32
436
+ # The original bigvision impl allows RoPE to upcast to float32. It is then immediately downcast again to the cache
437
+ # dtype when in inference mode (but not in training mode). I don't think any of this was intentional. Based on the
438
+ # original DeepMind impl, as well as the widely-used transformers impl, it is ok to always downcast back to bfloat16
439
+ # here.
440
+ return res.astype(x.dtype)
441
+
442
+
443
+ def _name(name, i):
444
+ # we name layers like this because we want the first expert's weights to have no suffix (e.g., "attn"), so that they
445
+ # can be loaded seamlessly from the existing PaliGemma checkpoint. subsequent experts will have a suffix (e.g.,
446
+ # "attn_1") and their weights will be initialized from scratch. in practice, we only use two experts -- PaliGemma,
447
+ # and the action expert.
448
+ if i == 0:
449
+ return name
450
+ return f"{name}_{i}"
451
+
452
+
453
+ def _gated_residual(x, y, gate):
454
+ assert (x is None) == (y is None)
455
+ if x is None:
456
+ return None
457
+ if gate is None:
458
+ return x + y
459
+ return x + y * gate
src/openpi/models/gemma_fast.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Big Vision Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Gemma model implementation from big_vision/models/ppp/gemma.py (with small modifications for NNX compatibility)
17
+ Used for FAST autoregressive policies.
18
+ """
19
+
20
+ import dataclasses
21
+ from typing import Literal, TypeAlias
22
+
23
+ import einops
24
+ import flax.linen as nn
25
+ import jax
26
+ import jax.numpy as jnp
27
+ import ml_collections
28
+
29
+ import openpi.models.lora as lora
30
+ import openpi.shared.array_typing as at
31
+
32
+ Variant = Literal["gemma_2b", "gemma_2b_lora"]
33
+
34
+
35
+ def get_config(variant):
36
+ """Returns config for specified gemma variant."""
37
+ if variant == "gemma_2b":
38
+ return ml_collections.ConfigDict(
39
+ {
40
+ "variant": variant,
41
+ "width": 2048,
42
+ "depth": 18,
43
+ "mlp_dim": 16_384,
44
+ "num_heads": 8,
45
+ "num_kv_heads": 1,
46
+ "head_dim": 256,
47
+ "norm_eps": 1e-6,
48
+ "vocab_size": 257_152,
49
+ "scan": True,
50
+ "remat_policy": "nothing_saveable",
51
+ }
52
+ )
53
+ if variant == "gemma_2b_lora":
54
+ return ml_collections.ConfigDict(
55
+ {
56
+ "variant": variant,
57
+ "width": 2048,
58
+ "depth": 18,
59
+ "mlp_dim": 16_384,
60
+ "num_heads": 8,
61
+ "num_kv_heads": 1,
62
+ "head_dim": 256,
63
+ "norm_eps": 1e-6,
64
+ "vocab_size": 257_152,
65
+ "scan": True,
66
+ "remat_policy": "nothing_saveable",
67
+ "lora_configs": {
68
+ "attn": lora.LoRAConfig(rank=16, alpha=16.0),
69
+ "ffn": lora.LoRAConfig(rank=16, alpha=16.0),
70
+ },
71
+ }
72
+ )
73
+ raise ValueError(f"Unknown variant: {variant}")
74
+
75
+
76
+ @at.typecheck
77
+ class Einsum(nn.Module):
78
+ shape: tuple[int, ...]
79
+
80
+ @nn.compact
81
+ def __call__(self, eqn, x):
82
+ dtype = x.dtype # original dtype, could be half-precision
83
+ w = self.param("w", nn.initializers.zeros_init(), self.shape).astype(dtype)
84
+ return jnp.einsum(eqn, x, w)
85
+
86
+
87
+ @at.typecheck
88
+ class RMSNorm(nn.Module):
89
+ @nn.compact
90
+ def __call__(self, x):
91
+ dtype = x.dtype # original dtype, could be half-precision
92
+ scale = self.param("scale", nn.initializers.zeros_init(), (x.shape[-1]))
93
+ var = jnp.mean(jnp.square(x.astype(jnp.float32)), axis=-1, keepdims=True) # compute variance in float32
94
+ normed_inputs = jnp.asarray(x * jnp.reciprocal(jnp.sqrt(var + 1e-06))) # compute normalization in float32
95
+ normed_inputs = normed_inputs * (
96
+ 1 + scale
97
+ ) # scale by learned parameter in float32 (matches Flax implementation)
98
+ return normed_inputs.astype(dtype) # return in original dtype
99
+
100
+
101
+ @at.typecheck
102
+ class Embedder(nn.Module):
103
+ """Embedder module."""
104
+
105
+ vocab_size: int
106
+ embed_dim: int
107
+
108
+ def setup(self):
109
+ self.input_embedding_table = self.param(
110
+ "input_embedding",
111
+ nn.initializers.zeros_init(),
112
+ (self.vocab_size, self.embed_dim),
113
+ )
114
+
115
+ def encode(self, x):
116
+ x = self.input_embedding_table[(x,)]
117
+ x *= jnp.sqrt(self.embed_dim).astype(x.dtype)
118
+ return x
119
+
120
+ def decode(self, x):
121
+ return jnp.dot(x, self.input_embedding_table.T)
122
+
123
+
124
+ @at.typecheck
125
+ class Attention(nn.Module):
126
+ """Attention module."""
127
+
128
+ num_heads: int
129
+ num_kv_heads: int
130
+ features: int
131
+ head_dim: int
132
+
133
+ cache_dtype: str | None = None
134
+
135
+ lora_config: lora.LoRAConfig | None = None
136
+
137
+ def setup(self):
138
+ if self.num_kv_heads == self.num_heads:
139
+ self.qkv_einsum = lora.Einsum(
140
+ shape=(3, self.num_heads, self.features, self.head_dim),
141
+ name="qkv_einsum",
142
+ init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)),
143
+ lora_config=self.lora_config,
144
+ )
145
+ else:
146
+ self.q_einsum = lora.Einsum(
147
+ shape=(self.num_heads, self.features, self.head_dim),
148
+ name="q_einsum",
149
+ init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)),
150
+ lora_config=self.lora_config,
151
+ )
152
+ self.kv_einsum = lora.Einsum(
153
+ shape=(2, self.num_kv_heads, self.features, self.head_dim),
154
+ name="kv_einsum",
155
+ init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0, 1)),
156
+ lora_config=self.lora_config,
157
+ )
158
+ self.attn_vec_einsum = lora.Einsum(
159
+ shape=(self.num_heads, self.head_dim, self.features),
160
+ name="attn_vec_einsum",
161
+ init_fn=nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)),
162
+ lora_config=self.lora_config,
163
+ )
164
+
165
+ def _init_cache(self, k, v, cache_size):
166
+ """Initialize KV cache"""
167
+ prefill_len = k.shape[1]
168
+ pad_width = ((0, 0), (0, cache_size - prefill_len), (0, 0), (0, 0))
169
+ cache_dtype = self.cache_dtype or k.dtype
170
+ k_cache = jnp.pad(k.astype(cache_dtype), pad_width)
171
+ v_cache = jnp.pad(v.astype(cache_dtype), pad_width)
172
+ idx = jnp.zeros((k.shape[0],), dtype=jnp.int32) + prefill_len
173
+ return idx, k_cache, v_cache
174
+
175
+ def _update_cache(self, k, v, idx, k_cache, v_cache):
176
+ """Update KV cache with new values"""
177
+ assert k.shape[1] == 1, "Only support kv-cache updates of length 1"
178
+ indices = (0, idx[0], 0, 0)
179
+ cache_dtype = self.cache_dtype or k.dtype
180
+ k_new = jax.lax.dynamic_update_slice(k_cache, k.astype(cache_dtype), indices)
181
+ v_new = jax.lax.dynamic_update_slice(v_cache, v.astype(cache_dtype), indices)
182
+ idx_new = idx + 1
183
+ return idx_new, k_new, v_new
184
+
185
+ @nn.compact
186
+ def __call__(self, x, positions, attn_mask, kv_cache, decode, deterministic=True): # noqa: FBT002
187
+ dtype = x.dtype # original dtype, could be half-precision
188
+ if self.num_kv_heads == self.num_heads:
189
+ q, k, v = self.qkv_einsum("BSD,3KDH->3BSKH", x)
190
+ else:
191
+ q = self.q_einsum("BTD,NDH->BTNH", x)
192
+ k, v = self.kv_einsum("BSD,2KDH->2BSKH", x)
193
+
194
+ q = _apply_rope(q, positions=positions) # promotes to float32
195
+ q *= self.head_dim**-0.5
196
+
197
+ k = _apply_rope(k, positions=positions) # promotes to float32
198
+
199
+ if kv_cache is None:
200
+ idx, k_cache, v_cache = self._init_cache(k, v, attn_mask.shape[-1])
201
+ else:
202
+ idx, k_cache, v_cache = kv_cache
203
+ idx, k_cache, v_cache = self._update_cache(k, v, idx, k_cache, v_cache)
204
+
205
+ k, v = k_cache, v_cache
206
+ kv_cache = (idx, k_cache, v_cache)
207
+
208
+ q = einops.rearrange(q, "B T (K G) H -> B T K G H", K=self.num_kv_heads)
209
+ logits = jnp.einsum("BTKGH,BSKH->BKGTS", q, k, preferred_element_type=jnp.float32)
210
+
211
+ if attn_mask.shape != (q.shape[0], 1, q.shape[1], k.shape[1]):
212
+ raise ValueError(
213
+ f"Attention mask with shape {attn_mask.shape} but shapes for q and k are: {q.shape} and {k.shape}"
214
+ )
215
+
216
+ # big_neg = jnp.finfo(logits.dtype).min
217
+ big_neg = -2.3819763e38 # See gemma/modules.py
218
+ masked_logits = jnp.where(attn_mask[:, :, None, :, :], logits, big_neg)
219
+
220
+ probs = jax.nn.softmax(masked_logits, axis=-1).astype(dtype)
221
+
222
+ encoded = jnp.einsum("BKGTS,BSKH->BTKGH", probs, v)
223
+ encoded = einops.rearrange(encoded, "B T K G H -> B T (K G) H")
224
+ return self.attn_vec_einsum("BTNH,NHD->BTD", encoded), kv_cache
225
+
226
+
227
+ @at.typecheck
228
+ class Block(nn.Module):
229
+ """Transformer block."""
230
+
231
+ num_heads: int
232
+ num_kv_heads: int
233
+ embed_dim: int
234
+ head_dim: int
235
+ hidden_dim: int
236
+
237
+ dropout: float = 0.0
238
+ dropout_bdims: tuple[int, ...] = ()
239
+ cache_dtype: str | None = None
240
+ lora_configs: ml_collections.ConfigDict = dataclasses.field(default_factory=ml_collections.ConfigDict)
241
+
242
+ def setup(self):
243
+ self.pre_attention_norm = RMSNorm()
244
+ self.attn = Attention(
245
+ num_heads=self.num_heads,
246
+ num_kv_heads=self.num_kv_heads,
247
+ features=self.embed_dim,
248
+ head_dim=self.head_dim,
249
+ cache_dtype=self.cache_dtype,
250
+ lora_config=self.lora_configs.get("attn"),
251
+ )
252
+ self.pre_ffw_norm = RMSNorm()
253
+ self.mlp = lora.FeedForward(
254
+ features=self.embed_dim, hidden_dim=self.hidden_dim, name="mlp", lora_config=self.lora_configs.get("ffn")
255
+ )
256
+ if self.dropout:
257
+ self.drop = nn.Dropout(self.dropout, self.dropout_bdims)
258
+ else:
259
+ self.drop = lambda x, _: x
260
+
261
+ def __call__(self, x, kv_cache, positions, attn_mask, decode, deterministic=True): # noqa: FBT002
262
+ x = nn.with_logical_constraint(x, ("act_batch", "act_len", "act_emb"))
263
+ inputs_normalized = self.pre_attention_norm(x)
264
+ attn_output, kv_cache = self.attn(inputs_normalized, positions, attn_mask, kv_cache, decode, deterministic)
265
+ attn_output = self.drop(attn_output, deterministic)
266
+ attn_output += x
267
+ residual = attn_output
268
+ attn_output = self.pre_ffw_norm(attn_output)
269
+ outputs = self.mlp(attn_output)
270
+ outputs = self.drop(outputs, deterministic)
271
+ outputs = residual + outputs
272
+ return outputs, kv_cache
273
+
274
+
275
+ KVCache: TypeAlias = tuple[at.Int[at.Array, " b"], at.Float[at.Array, "b _t _k _h"], at.Float[at.Array, "b _t _v _h"]]
276
+
277
+
278
+ @at.typecheck
279
+ class Module(nn.Module):
280
+ """gemma model."""
281
+
282
+ variant: str
283
+
284
+ width: int
285
+ depth: int
286
+ mlp_dim: int
287
+ num_heads: int
288
+ num_kv_heads: int
289
+ head_dim: int
290
+ norm_eps: float
291
+ vocab_size: int
292
+ embed_dtype: str
293
+
294
+ dropout: float = 0.0
295
+ dropout_bdims: tuple[int, ...] = () # Every float is dropped independently.
296
+ cache_dtype: str | None = None
297
+
298
+ scan: bool = False
299
+ remat_policy: str = "none"
300
+ lora_configs: ml_collections.ConfigDict = dataclasses.field(default_factory=ml_collections.ConfigDict)
301
+
302
+ @nn.compact
303
+ def __call__(
304
+ self,
305
+ tokens=None,
306
+ embedded_prefix=None,
307
+ embed_only=False, # noqa: FBT002
308
+ pre_logits=None,
309
+ positions=None,
310
+ mask=None,
311
+ decode=False, # noqa: FBT002
312
+ kv_cache=None,
313
+ deterministic=True, # noqa: FBT002
314
+ return_prelogits=False, # noqa: FBT002
315
+ ):
316
+ """Embed only, or complete forward pass.
317
+
318
+ Args:
319
+ tokens: Embedded, then and appended to `embedded_prefix`. Can be None.
320
+ embedded_prefix: Optional prefix that is already embedded.
321
+ embed_only: Whether to compute embeddings only.
322
+ pre_logits: If present computes logits from pre_logits and returns.
323
+ positions: Optional `[B, T]` allows to specify the absolute position of
324
+ the tokens.
325
+ mask: Optional attention mask `[B, T, S]`.
326
+ decode: Whether to use kv-cache. Caller must pass masks and positions.
327
+ deterministic: Forwarded to all dropout layers.
328
+ return_prelogits: Whether to return the pre-logits.
329
+
330
+ Returns:
331
+ If `embed_only=False`, then `(logits, out)` will be returned.
332
+ If `embed_only=True`, then the embeddings will be returned.
333
+ If `return_prelogits=True`, then the pre-logits will be returned.
334
+ """
335
+ out = {}
336
+
337
+ embedder = Embedder(vocab_size=self.vocab_size, embed_dim=self.width, name="embedder")
338
+
339
+ if pre_logits is not None:
340
+ x = out["pre_logits"] = pre_logits
341
+ logits = out["logits"] = embedder.decode(x)
342
+ return logits, out
343
+
344
+ x = []
345
+ if embedded_prefix is not None:
346
+ x.append(embedded_prefix)
347
+ if tokens is not None:
348
+ x.append(embedder.encode(tokens))
349
+
350
+ x = jnp.concatenate(x, axis=-2)
351
+ x = x.astype(self.embed_dtype)
352
+ batch_size, seq_len, width = x.shape
353
+
354
+ if embed_only:
355
+ return x
356
+
357
+ if decode:
358
+ assert positions is not None and mask is not None, ( # noqa: PT018
359
+ "Must explicitly pass positions and mask for decoding."
360
+ )
361
+
362
+ if positions is None:
363
+ positions = jnp.arange(seq_len).astype(jnp.int32)[None, :]
364
+ assert positions.shape[1] == x.shape[1], (positions.shape, x.shape)
365
+
366
+ if mask is None:
367
+ mask = nn.attention.make_causal_mask(jnp.ones([batch_size, seq_len]))
368
+ if mask.ndim == 3:
369
+ mask = mask[:, None, :, :]
370
+ cache_size = max(seq_len, mask.shape[-1])
371
+ assert mask.shape == (batch_size, 1, seq_len, cache_size), mask.shape
372
+
373
+ if self.remat_policy == "none":
374
+ block_cls = Block
375
+ else:
376
+ block_cls = nn.remat(
377
+ Block,
378
+ prevent_cse=not self.scan,
379
+ static_argnums=(5, 6), # 0=self, 5=decode, 6=deterministic
380
+ policy=getattr(jax.checkpoint_policies, self.remat_policy),
381
+ )
382
+
383
+ block_kw = {
384
+ "num_heads": self.num_heads,
385
+ "head_dim": self.head_dim,
386
+ "num_kv_heads": self.num_kv_heads,
387
+ "embed_dim": width,
388
+ "hidden_dim": self.mlp_dim,
389
+ "dropout": self.dropout,
390
+ "dropout_bdims": self.dropout_bdims,
391
+ "cache_dtype": self.cache_dtype,
392
+ "lora_configs": self.lora_configs,
393
+ }
394
+ layers = self.scope.push("layers")
395
+ blocks = [
396
+ nn.scan(
397
+ block_cls,
398
+ variable_axes={"params": 0},
399
+ split_rngs={"params": True, "dropout": True},
400
+ in_axes=(0, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast), # 0=kv_cache, 1=positions, 2=mask
401
+ length=self.depth,
402
+ )(parent=layers, **block_kw)
403
+ ]
404
+ for block in blocks:
405
+ x, kv_cache = block(x, kv_cache, positions, mask, decode, deterministic)
406
+
407
+ assert x.dtype == jnp.dtype(self.embed_dtype) # Sanity check.
408
+ out["encoded"] = x
409
+
410
+ x = RMSNorm(name="final_norm")(x)
411
+ out["pre_logits"] = x
412
+ if return_prelogits:
413
+ return x, kv_cache, out
414
+
415
+ x = embedder.decode(x)
416
+ out["logits"] = x
417
+
418
+ return x, kv_cache, out
419
+
420
+ def init(self):
421
+ """Convenience method for initializing all parameters, necessary due to the quirks of linen."""
422
+ self(jnp.zeros((1, 1), dtype=jnp.int32))
423
+
424
+
425
+ def _apply_rope(x, *, positions, max_wavelength=10_000):
426
+ """Applies RoPE positions [B, L] to x [B, L, H, D]."""
427
+ freq_exponents = (2.0 / x.shape[-1]) * jnp.arange(x.shape[-1] // 2, dtype=jnp.float32)
428
+ timescale = max_wavelength**freq_exponents
429
+ radians = positions[..., None] / timescale[None, None, :]
430
+ radians = radians[..., None, :]
431
+ assert radians.dtype == jnp.float32
432
+ # radians.shape = [...,L,1,d=D/2]
433
+ sin, cos = jnp.sin(radians), jnp.cos(radians)
434
+ x1, x2 = jnp.split(x, 2, axis=-1)
435
+ res = jnp.concatenate([x1 * cos - x2 * sin, x2 * cos + x1 * sin], axis=-1)
436
+ assert res.dtype == jnp.float32
437
+ return res
src/openpi/models/lora.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import re
3
+
4
+ import flax.linen as nn
5
+ import flax.struct as struct
6
+ import jax.numpy as jnp
7
+
8
+ import openpi.shared.array_typing as at
9
+
10
+
11
+ @struct.dataclass
12
+ class LoRAConfig:
13
+ """Configuration for LoRA."""
14
+
15
+ # LoRA rank.
16
+ rank: int
17
+ # LoRA scaling factor.
18
+ alpha: float = 1.0
19
+ # Initialization function for LoRA parameters.
20
+ init_fn: nn.initializers.Initializer = nn.initializers.normal(stddev=0.01)
21
+ # Enable rank-stabilized LoRA: https://arxiv.org/pdf/2312.03732
22
+ rslora: bool = False
23
+ # Axes in the weight to apply LoRA to. Should typically be the last two axes.
24
+ axes: tuple[int, int] = (-2, -1)
25
+ # Axis label which is used by LoRA in einsum equations. Must not be present in the original equation.
26
+ label: str = "L"
27
+
28
+ @property
29
+ def scaling_value(self) -> float:
30
+ return self.alpha / math.sqrt(self.rank) if self.rslora else self.alpha / self.rank
31
+
32
+
33
+ class Einsum(nn.Module):
34
+ """Einsum with LoRA support. Can be used as a drop-in replacement for the Gemma Einsum."""
35
+
36
+ # Shape of the weight.
37
+ shape: tuple[int, ...]
38
+ # Initialization function for the weight.
39
+ init_fn: nn.initializers.Initializer = nn.initializers.zeros
40
+ # If not None, apply LoRA to the weight.
41
+ lora_config: LoRAConfig | None = None
42
+
43
+ def setup(self):
44
+ self.w = self.param("w", self.init_fn, self.shape)
45
+
46
+ if config := self.lora_config:
47
+ # Setup LoRA parameters.
48
+ shape_a, shape_b = list(self.shape), list(self.shape)
49
+ shape_a[config.axes[1]] = config.rank
50
+ shape_b[config.axes[0]] = config.rank
51
+ self.w_a = self.param("lora_a", config.init_fn, shape_a)
52
+ self.w_b = self.param("lora_b", config.init_fn, shape_b)
53
+
54
+ @nn.compact
55
+ def __call__(self, eqn: str, x):
56
+ dtype = x.dtype # original dtype, could be half-precision
57
+ result = jnp.einsum(eqn, x, self.w.astype(dtype))
58
+
59
+ if config := self.lora_config:
60
+ eqn_a, eqn_b = self._make_lora_eqns(eqn)
61
+ lora = jnp.einsum(eqn_a, x, self.w_a.astype(dtype))
62
+ lora = jnp.einsum(eqn_b, lora, self.w_b.astype(dtype))
63
+ result = result + lora * config.scaling_value
64
+
65
+ return result
66
+
67
+ def _make_lora_eqns(self, eqn: str) -> tuple[str, str]:
68
+ if "L" in eqn:
69
+ raise ValueError(f"L already in eqn: {eqn}")
70
+ if not (m := re.match("(.*),(.*)->(.*)", eqn)):
71
+ raise ValueError(f"Unsupported einsum eqn: {eqn}")
72
+ lhs, rhs, out = m.groups()
73
+
74
+ assert self.lora_config is not None
75
+ a_label, b_label = (rhs[x] for x in self.lora_config.axes)
76
+ label = self.lora_config.label
77
+
78
+ a_rhs = rhs.replace(b_label, label)
79
+ a_out = out.replace(b_label, label)
80
+ eqn_a = f"{lhs},{a_rhs}->{a_out}"
81
+
82
+ b_rhs = rhs.replace(a_label, label)
83
+ eqn_b = f"{a_out},{b_rhs}->{out}"
84
+
85
+ return eqn_a, eqn_b
86
+
87
+
88
+ class FeedForward(nn.Module):
89
+ """Feed forward module."""
90
+
91
+ features: int
92
+ hidden_dim: int
93
+ # If not None, apply LoRA to the weight.
94
+ lora_config: LoRAConfig | None = None
95
+
96
+ def setup(self):
97
+ self.w_gating = self.param(
98
+ "gating_einsum",
99
+ nn.initializers.lecun_normal(in_axis=-2, out_axis=-1, batch_axis=(0,)),
100
+ (2, self.features, self.hidden_dim),
101
+ )
102
+ self.w_linear = self.param(
103
+ "linear",
104
+ nn.initializers.lecun_normal(in_axis=-2, out_axis=-1),
105
+ (self.hidden_dim, self.features),
106
+ )
107
+ self.w_gating_lora = None
108
+ self.w_linear_lora = None
109
+ if self.lora_config:
110
+ # Setup LoRA parameters.
111
+ # TODO: follow up with a simplified init_fn api.
112
+ self.w_gating_lora = (
113
+ self.param("gating_einsum_lora_a", self.lora_config.init_fn, (2, self.features, self.lora_config.rank)),
114
+ self.param(
115
+ "gating_einsum_lora_b", self.lora_config.init_fn, (2, self.lora_config.rank, self.hidden_dim)
116
+ ),
117
+ )
118
+ self.w_linear_lora = (
119
+ self.param("linear_lora_a", self.lora_config.init_fn, (self.hidden_dim, self.lora_config.rank)),
120
+ self.param("linear_lora_b", self.lora_config.init_fn, (self.lora_config.rank, self.features)),
121
+ )
122
+
123
+ @nn.compact
124
+ def __call__(self, x):
125
+ dtype = x.dtype # original dtype, could be half-precision
126
+ ff_gate = self._dot(
127
+ x,
128
+ self.w_gating[0],
129
+ None if self.w_gating_lora is None else (self.w_gating_lora[0][0], self.w_gating_lora[1][0]),
130
+ )
131
+ gate_value = nn.gelu(ff_gate)
132
+
133
+ ff1 = self._dot(
134
+ x,
135
+ self.w_gating[1],
136
+ None if self.w_gating_lora is None else (self.w_gating_lora[0][1], self.w_gating_lora[1][1]),
137
+ )
138
+ activations = gate_value * ff1
139
+
140
+ outputs = self._dot(activations, self.w_linear, self.w_linear_lora)
141
+ assert outputs.dtype == dtype
142
+ return outputs
143
+
144
+ def _dot(self, x: at.Array, w: at.Array, lora_weights: tuple[at.Array, at.Array] | None) -> at.Array:
145
+ base = jnp.dot(x, w.astype(x.dtype))
146
+ if lora_weights is None:
147
+ return base
148
+ return base + jnp.dot(jnp.dot(x, lora_weights[0].astype(x.dtype)), lora_weights[1].astype(x.dtype))
src/openpi/models/lora_test.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import flax.linen as nn
2
+ import jax
3
+ import jax.numpy as jnp
4
+
5
+ import openpi.models.lora as lora
6
+
7
+
8
+ def test_lora_einsum_params_shape():
9
+ shape = (3, 8, 32, 4) # (3KDH)
10
+ einsum = lora.Einsum(shape)
11
+ lora0 = lora.Einsum(shape, lora_config=lora.LoRAConfig(rank=2))
12
+ lora1 = lora.Einsum(shape, lora_config=lora.LoRAConfig(rank=2, axes=(1, 2)))
13
+
14
+ key = jax.random.key(0)
15
+ x = jax.random.normal(key, (8, 64, 32)) # (BSD)
16
+ eqn = "BSD,3KDH->3BSKH"
17
+
18
+ # Ensure that lora parameters are not initialized when LoRA is not used.
19
+ params = einsum.init(key, eqn, x)
20
+ assert "lora_a" not in params["params"]
21
+ assert "lora_b" not in params["params"]
22
+
23
+ # Check that default axes work.
24
+ params_lora0 = lora0.init(key, eqn, x)
25
+ assert params_lora0["params"]["lora_a"].shape == (3, 8, 32, 2)
26
+ assert params_lora0["params"]["lora_b"].shape == (3, 8, 2, 4)
27
+
28
+ # Check that user provided axes work.
29
+ params_lora1 = lora1.init(key, eqn, x)
30
+ assert params_lora1["params"]["lora_a"].shape == (3, 8, 2, 4)
31
+ assert params_lora1["params"]["lora_b"].shape == (3, 2, 32, 4)
32
+
33
+
34
+ def test_lora_einsum_same_output():
35
+ shape = (3, 8, 32, 4) # (3KDH)
36
+ einsum = lora.Einsum(shape)
37
+ einsum_lora = lora.Einsum(shape, lora_config=lora.LoRAConfig(rank=2, init_fn=nn.initializers.zeros))
38
+
39
+ key = jax.random.key(0)
40
+ x = jax.random.normal(key, (8, 64, 32)) # (BSD)
41
+ eqn = "BSD,3KDH->3BSKH"
42
+
43
+ params = einsum.init(key, eqn, x)
44
+ output = einsum.apply(params, eqn, x)
45
+
46
+ params_lora = einsum_lora.init(key, eqn, x)
47
+ output_lora = einsum_lora.apply(params_lora, eqn, x)
48
+
49
+ # Results are the same since the LoRA parameters are initialized to zeros.
50
+ assert jnp.allclose(output, output_lora)
51
+
52
+
53
+ def test_lora_ffn_params_shape():
54
+ ffn = lora.FeedForward(features=8, hidden_dim=32)
55
+ ffn_lora = lora.FeedForward(
56
+ features=8,
57
+ hidden_dim=32,
58
+ lora_config=lora.LoRAConfig(rank=2),
59
+ )
60
+
61
+ key = jax.random.key(0)
62
+ x = jax.random.normal(key, (2, 8))
63
+
64
+ params = ffn.init(key, x)
65
+ assert params["params"]["gating_einsum"].shape == (2, 8, 32)
66
+ assert params["params"]["linear"].shape == (32, 8)
67
+
68
+ params_lora = ffn_lora.init(key, x)
69
+ assert params_lora["params"]["gating_einsum"].shape == (2, 8, 32)
70
+ assert params_lora["params"]["linear"].shape == (32, 8)
71
+ assert params_lora["params"]["gating_einsum_lora_a"].shape == (2, 8, 2)
72
+ assert params_lora["params"]["gating_einsum_lora_b"].shape == (2, 2, 32)
73
+ assert params_lora["params"]["linear_lora_a"].shape == (32, 2)
74
+ assert params_lora["params"]["linear_lora_b"].shape == (2, 8)
75
+
76
+
77
+ def test_lora_ffn_same_output():
78
+ ffn = lora.FeedForward(features=8, hidden_dim=32)
79
+ ffn_lora = lora.FeedForward(
80
+ features=8,
81
+ hidden_dim=32,
82
+ lora_config=lora.LoRAConfig(rank=2, init_fn=nn.initializers.zeros),
83
+ )
84
+
85
+ key = jax.random.key(0)
86
+ x = jax.random.normal(key, (2, 8))
87
+
88
+ params = ffn.init(key, x)
89
+ output = ffn.apply(params, x)
90
+
91
+ params_lora = ffn_lora.init(key, x)
92
+ output_lora = ffn_lora.apply(params_lora, x)
93
+
94
+ assert jnp.allclose(output, output_lora)
src/openpi/models/model.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from collections.abc import Sequence
3
+ import dataclasses
4
+ import enum
5
+ import logging
6
+ import pathlib
7
+ from typing import Generic, TypeVar
8
+
9
+ import augmax
10
+ from flax import nnx
11
+ from flax import struct
12
+ from flax import traverse_util
13
+ import jax
14
+ import jax.numpy as jnp
15
+ import numpy as np
16
+ import orbax.checkpoint as ocp
17
+ import safetensors
18
+ import torch
19
+
20
+ from openpi.models_pytorch import pi0_pytorch
21
+ from openpi.shared import image_tools
22
+ import openpi.shared.array_typing as at
23
+
24
+ logger = logging.getLogger("openpi")
25
+
26
+ # Type variable for array types (JAX arrays, PyTorch tensors, or numpy arrays)
27
+ ArrayT = TypeVar("ArrayT", bound=jax.Array | torch.Tensor | np.ndarray)
28
+
29
+
30
+ class ModelType(enum.Enum):
31
+ """Supported model types."""
32
+
33
+ PI0 = "pi0"
34
+ PI0_FAST = "pi0_fast"
35
+ PI05 = "pi05"
36
+
37
+
38
+ # The model always expects these images
39
+ IMAGE_KEYS = (
40
+ "base_0_rgb",
41
+ "left_wrist_0_rgb",
42
+ "right_wrist_0_rgb",
43
+ )
44
+
45
+
46
+ # This may need change if we release a small model.
47
+ IMAGE_RESOLUTION = (224, 224)
48
+
49
+
50
+ # Data format
51
+ #
52
+ # Data transforms produce the model input as a nested dictionary which is later converted
53
+ # into `Obesrvation` and `Actions` objects. See below.
54
+ #
55
+ # In the dictory form, this data should look like:
56
+ # {
57
+ # # Observation data.
58
+ # "image": {
59
+ # "base_0_rgb": (float32|uint8)[*b, h, w, 3], # RGB image in [-1, 1] or [0, 255]
60
+ # ... # Additional camera views
61
+ # },
62
+ # "image_mask": {
63
+ # "base_0_rgb": bool[*b], # True if image is valid
64
+ # ... # Masks for additional views
65
+ # },
66
+ # "state": float32[*b, s], # Low-dimensional robot state
67
+ # "tokenized_prompt": int32[*b, l], # Optional, tokenized language prompt
68
+ # "tokenized_prompt_mask": bool[*b, l], # Optional, mask for tokenized prompt
69
+ # "token_ar_mask": int32[*b, l], # Optional, autoregressive mask for FAST model
70
+ # "token_loss_mask": bool[*b, l], # Optional, loss mask for FAST model
71
+ #
72
+ # # Actions data.
73
+ # "actions": float32[*b ah ad]
74
+ # }
75
+ # where:
76
+ # *b = batch dimensions
77
+ # h,w = image height/width
78
+ # s = state dimension
79
+ # l = sequence length
80
+ #
81
+ @at.typecheck
82
+ @struct.dataclass
83
+ class Observation(Generic[ArrayT]):
84
+ """Holds observations, i.e., inputs to the model.
85
+
86
+ See `Observation.from_dict` to see the expected dictionary form. This is the format
87
+ that should be produced by the data transforms.
88
+ """
89
+
90
+ # Images, in [-1, 1] float32.
91
+ images: dict[str, at.Float[ArrayT, "*b h w c"]]
92
+ # Image masks, with same keys as images.
93
+ image_masks: dict[str, at.Bool[ArrayT, "*b"]]
94
+ # Low-dimensional robot state.
95
+ state: at.Float[ArrayT, "*b s"]
96
+
97
+ # Tokenized prompt.
98
+ tokenized_prompt: at.Int[ArrayT, "*b l"] | None = None
99
+ # Tokenized prompt mask.
100
+ tokenized_prompt_mask: at.Bool[ArrayT, "*b l"] | None = None
101
+
102
+ # pi0-fast model specific fields.
103
+
104
+ # Token auto-regressive mask (for FAST autoregressive model).
105
+ token_ar_mask: at.Int[ArrayT, "*b l"] | None = None
106
+ # Token loss mask (for FAST autoregressive model).
107
+ token_loss_mask: at.Bool[ArrayT, "*b l"] | None = None
108
+
109
+ @classmethod
110
+ def from_dict(cls, data: at.PyTree[ArrayT]) -> "Observation[ArrayT]":
111
+ """This method defines the mapping between unstructured data (i.e., nested dict) to the structured Observation format."""
112
+ # Ensure that tokenized_prompt and tokenized_prompt_mask are provided together.
113
+ if ("tokenized_prompt" in data) != ("tokenized_prompt_mask" in data):
114
+ raise ValueError("tokenized_prompt and tokenized_prompt_mask must be provided together.")
115
+ # If images are uint8, convert them to [-1, 1] float32.
116
+ for key in data["image"]:
117
+ if data["image"][key].dtype == np.uint8:
118
+ data["image"][key] = data["image"][key].astype(np.float32) / 255.0 * 2.0 - 1.0
119
+ elif hasattr(data["image"][key], "dtype") and data["image"][key].dtype == torch.uint8:
120
+ data["image"][key] = data["image"][key].to(torch.float32).permute(0, 3, 1, 2) / 255.0 * 2.0 - 1.0
121
+ return cls(
122
+ images=data["image"],
123
+ image_masks=data["image_mask"],
124
+ state=data["state"],
125
+ tokenized_prompt=data.get("tokenized_prompt"),
126
+ tokenized_prompt_mask=data.get("tokenized_prompt_mask"),
127
+ token_ar_mask=data.get("token_ar_mask"),
128
+ token_loss_mask=data.get("token_loss_mask"),
129
+ )
130
+
131
+ def to_dict(self) -> at.PyTree[ArrayT]:
132
+ """Convert the Observation to a nested dict."""
133
+ result = dataclasses.asdict(self)
134
+ result["image"] = result.pop("images")
135
+ result["image_mask"] = result.pop("image_masks")
136
+ return result
137
+
138
+
139
+ # Defines the format of the actions. This field is included as "actions" inside the dictionary
140
+ # produced by the data transforms.
141
+ Actions = at.Float[ArrayT, "*b ah ad"]
142
+
143
+
144
+ def preprocess_observation(
145
+ rng: at.KeyArrayLike | None,
146
+ observation: Observation,
147
+ *,
148
+ train: bool = False,
149
+ image_keys: Sequence[str] = IMAGE_KEYS,
150
+ image_resolution: tuple[int, int] = IMAGE_RESOLUTION,
151
+ ) -> Observation:
152
+ """Preprocess the observations by performing image augmentations (if train=True), resizing (if necessary), and
153
+ filling in a default image mask (if necessary).
154
+ """
155
+
156
+ if not set(image_keys).issubset(observation.images):
157
+ raise ValueError(f"images dict missing keys: expected {image_keys}, got {list(observation.images)}")
158
+
159
+ batch_shape = observation.state.shape[:-1]
160
+
161
+ out_images = {}
162
+ for key in image_keys:
163
+ image = observation.images[key]
164
+ if image.shape[1:3] != image_resolution:
165
+ logger.info(f"Resizing image {key} from {image.shape[1:3]} to {image_resolution}")
166
+ image = image_tools.resize_with_pad(image, *image_resolution)
167
+
168
+ if train:
169
+ # Convert from [-1, 1] to [0, 1] for augmax.
170
+ image = image / 2.0 + 0.5
171
+
172
+ transforms = []
173
+ if "wrist" not in key:
174
+ height, width = image.shape[1:3]
175
+ transforms += [
176
+ augmax.RandomCrop(int(width * 0.95), int(height * 0.95)),
177
+ augmax.Resize(width, height),
178
+ augmax.Rotate((-5, 5)),
179
+ ]
180
+ transforms += [
181
+ augmax.ColorJitter(brightness=0.3, contrast=0.4, saturation=0.5),
182
+ ]
183
+ sub_rngs = jax.random.split(rng, image.shape[0])
184
+ image = jax.vmap(augmax.Chain(*transforms))(sub_rngs, image)
185
+
186
+ # Back to [-1, 1].
187
+ image = image * 2.0 - 1.0
188
+
189
+ out_images[key] = image
190
+
191
+ # obtain mask
192
+ out_masks = {}
193
+ for key in out_images:
194
+ if key not in observation.image_masks:
195
+ # do not mask by default
196
+ out_masks[key] = jnp.ones(batch_shape, dtype=jnp.bool)
197
+ else:
198
+ out_masks[key] = jnp.asarray(observation.image_masks[key])
199
+
200
+ return Observation(
201
+ images=out_images,
202
+ image_masks=out_masks,
203
+ state=observation.state,
204
+ tokenized_prompt=observation.tokenized_prompt,
205
+ tokenized_prompt_mask=observation.tokenized_prompt_mask,
206
+ token_ar_mask=observation.token_ar_mask,
207
+ token_loss_mask=observation.token_loss_mask,
208
+ )
209
+
210
+
211
+ @dataclasses.dataclass(frozen=True)
212
+ class BaseModelConfig(abc.ABC):
213
+ """Configuration shared by all models. Specific models should inherit from this class, and implement the `create`
214
+ method to create the corresponding model.
215
+ """
216
+
217
+ # Action space dimension.
218
+ action_dim: int
219
+ # Action sequence length.
220
+ action_horizon: int
221
+ # Tokenized prompt maximum length.
222
+ max_token_len: int
223
+
224
+ @property
225
+ @abc.abstractmethod
226
+ def model_type(self) -> ModelType:
227
+ """The model type."""
228
+
229
+ @abc.abstractmethod
230
+ def create(self, rng: at.KeyArrayLike) -> "BaseModel":
231
+ """Create a new model, initializing parameters."""
232
+
233
+ def load(self, params: at.Params, *, remove_extra_params: bool = True) -> "BaseModel":
234
+ """Create a model with the given parameters."""
235
+ model = nnx.eval_shape(self.create, jax.random.key(0))
236
+ graphdef, state = nnx.split(model)
237
+ if remove_extra_params:
238
+ params = ocp.transform_utils.intersect_trees(state.to_pure_dict(), params)
239
+ at.check_pytree_equality(expected=state.to_pure_dict(), got=params, check_shapes=True, check_dtypes=False)
240
+ state.replace_by_pure_dict(params)
241
+ return nnx.merge(graphdef, state)
242
+
243
+ def load_pytorch(self, train_config, weight_path: str):
244
+ logger.info(f"train_config: {train_config}")
245
+ model = pi0_pytorch.PI0Pytorch(config=train_config.model)
246
+ safetensors.torch.load_model(model, weight_path)
247
+ return model
248
+
249
+ @abc.abstractmethod
250
+ def inputs_spec(self, *, batch_size: int = 1) -> tuple[Observation, Actions]:
251
+ """Returns the input specification for the model. Values are jax.ShapeDtypeStruct."""
252
+
253
+ def fake_obs(self, batch_size: int = 1) -> Observation:
254
+ observation_spec, _ = self.inputs_spec(batch_size=batch_size)
255
+ return jax.tree.map(lambda x: jnp.ones(x.shape, x.dtype), observation_spec)
256
+
257
+ def fake_act(self, batch_size: int = 1) -> Actions:
258
+ _, action_spec = self.inputs_spec(batch_size=batch_size)
259
+ return jax.tree.map(lambda x: jnp.ones(x.shape, x.dtype), action_spec)
260
+
261
+
262
+ @dataclasses.dataclass
263
+ class BaseModel(nnx.Module, abc.ABC):
264
+ """Base class for all model implementations. Specific models should inherit from this class. They should call
265
+ super().__init__() to initialize the shared attributes (action_dim, action_horizon, and max_token_len).
266
+ """
267
+
268
+ action_dim: int
269
+ action_horizon: int
270
+ max_token_len: int
271
+
272
+ @abc.abstractmethod
273
+ def compute_loss(
274
+ self,
275
+ rng: at.KeyArrayLike,
276
+ observation: Observation,
277
+ actions: Actions,
278
+ *,
279
+ train: bool = False,
280
+ ) -> at.Float[at.Array, "*b ah"]: ...
281
+
282
+ @abc.abstractmethod
283
+ def sample_actions(self, rng: at.KeyArrayLike, observation: Observation, **kwargs) -> Actions: ...
284
+
285
+
286
+ def restore_params(
287
+ params_path: pathlib.Path | str,
288
+ *,
289
+ restore_type: type[np.ndarray] | type[jax.Array] = jax.Array,
290
+ dtype: jnp.dtype | None = None,
291
+ sharding: jax.sharding.Sharding | None = None,
292
+ ) -> at.Params:
293
+ """Restores unstructured params PyTree from a checkpoint.
294
+
295
+ This works with checkpoints saved with `save_state` during openpi training (see `training/checkpoints.py`) as
296
+ well as pre-trained checkpoints released for openpi.
297
+
298
+ Args:
299
+ params_path: The local path to the checkpoint directory.
300
+ restore_type: The type to restore the params as. Can be set to `np.ndarray` to load the params as a numpy array.
301
+ dtype: The dtype to restore all params as. If not provided, will use the original dtype from the checkpoint.
302
+ sharding: The sharding to use for the params. If not provided, the params will be replicated across all devices.
303
+
304
+ Returns:
305
+ The restored params.
306
+ """
307
+ params_path = pathlib.Path(params_path).resolve() if not str(params_path).startswith("gs://") else params_path
308
+
309
+ if restore_type is jax.Array and sharding is None:
310
+ mesh = jax.sharding.Mesh(jax.devices(), ("x",))
311
+ sharding = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
312
+
313
+ with ocp.PyTreeCheckpointer() as ckptr:
314
+ metadata = ckptr.metadata(params_path)
315
+ item = {"params": metadata["params"]}
316
+
317
+ params = ckptr.restore(
318
+ params_path,
319
+ ocp.args.PyTreeRestore(
320
+ item=item,
321
+ restore_args=jax.tree.map(
322
+ lambda _: ocp.ArrayRestoreArgs(sharding=sharding, restore_type=restore_type, dtype=dtype), item
323
+ ),
324
+ ),
325
+ )["params"]
326
+
327
+ # If the params were saved with `save_state` during openpi training, every key path will end with "value", which is
328
+ # added by `nnx.State`. We remove the "value" suffix here and always return what NNX calls a "pure dict".
329
+ flat_params = traverse_util.flatten_dict(params)
330
+ if all(kp[-1] == "value" for kp in flat_params):
331
+ flat_params = {kp[:-1]: v for kp, v in flat_params.items()}
332
+ return traverse_util.unflatten_dict(flat_params)
src/openpi/models/model_test.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flax import nnx
2
+ import jax
3
+ import pytest
4
+
5
+ from openpi.models import model as _model
6
+ from openpi.models import pi0_config
7
+ from openpi.models import pi0_fast
8
+ from openpi.shared import download
9
+ from openpi.shared import nnx_utils
10
+
11
+
12
+ def test_pi0_model():
13
+ key = jax.random.key(0)
14
+ config = pi0_config.Pi0Config()
15
+ model = config.create(key)
16
+
17
+ batch_size = 2
18
+ obs, act = config.fake_obs(batch_size), config.fake_act(batch_size)
19
+
20
+ loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act)
21
+ assert loss.shape == (batch_size, config.action_horizon)
22
+
23
+ actions = nnx_utils.module_jit(model.sample_actions)(key, obs, num_steps=10)
24
+ assert actions.shape == (batch_size, model.action_horizon, model.action_dim)
25
+
26
+
27
+ def test_pi0_lora_model():
28
+ key = jax.random.key(0)
29
+ config = pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora")
30
+ model = config.create(key)
31
+
32
+ batch_size = 2
33
+ obs, act = config.fake_obs(batch_size), config.fake_act(batch_size)
34
+
35
+ loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act)
36
+ assert loss.shape == (batch_size, config.action_horizon)
37
+
38
+ actions = nnx_utils.module_jit(model.sample_actions)(key, obs, num_steps=10)
39
+ assert actions.shape == (batch_size, model.action_horizon, model.action_dim)
40
+
41
+
42
+ def test_pi0_fast_model():
43
+ key = jax.random.key(0)
44
+ config = pi0_fast.Pi0FASTConfig()
45
+ model = config.create(key)
46
+
47
+ batch_size = 2
48
+ obs, act = config.fake_obs(batch_size), config.fake_act(batch_size)
49
+
50
+ loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act)
51
+ assert loss.shape == (batch_size,)
52
+
53
+ actions = nnx_utils.module_jit(model.sample_actions)(key, obs)
54
+ assert actions.shape == (batch_size, 256)
55
+
56
+
57
+ def test_pi0_fast_lora_model():
58
+ key = jax.random.key(0)
59
+ config = pi0_fast.Pi0FASTConfig(paligemma_variant="gemma_2b_lora")
60
+ model = config.create(key)
61
+
62
+ batch_size = 2
63
+ obs, act = config.fake_obs(batch_size), config.fake_act(batch_size)
64
+
65
+ loss = nnx_utils.module_jit(model.compute_loss)(key, obs, act)
66
+ assert loss.shape == (batch_size,)
67
+
68
+ actions = nnx_utils.module_jit(model.sample_actions)(key, obs)
69
+ assert actions.shape == (batch_size, 256)
70
+
71
+ lora_filter = nnx_utils.PathRegex(".*lora.*")
72
+ model_state = nnx.state(model)
73
+
74
+ lora_state_elems = list(model_state.filter(lora_filter))
75
+ assert len(lora_state_elems) > 0
76
+
77
+
78
+ @pytest.mark.manual
79
+ def test_model_restore():
80
+ key = jax.random.key(0)
81
+ config = pi0_config.Pi0Config()
82
+
83
+ batch_size = 2
84
+ obs, act = config.fake_obs(batch_size), config.fake_act(batch_size)
85
+
86
+ model = config.load(
87
+ _model.restore_params(download.maybe_download("gs://openpi-assets/checkpoints/pi0_base/params"))
88
+ )
89
+
90
+ loss = model.compute_loss(key, obs, act)
91
+ assert loss.shape == (batch_size, config.action_horizon)
92
+
93
+ actions = model.sample_actions(key, obs, num_steps=10)
94
+ assert actions.shape == (batch_size, model.action_horizon, model.action_dim)
src/openpi/models/pi0.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import einops
4
+ import flax.nnx as nnx
5
+ import flax.nnx.bridge as nnx_bridge
6
+ import jax
7
+ import jax.numpy as jnp
8
+ from typing_extensions import override
9
+
10
+ from openpi.models import model as _model
11
+ from openpi.models import pi0_config
12
+ import openpi.models.gemma as _gemma
13
+ import openpi.models.siglip as _siglip
14
+ from openpi.shared import array_typing as at
15
+
16
+ logger = logging.getLogger("openpi")
17
+
18
+
19
+ def make_attn_mask(input_mask, mask_ar):
20
+ """Adapted from big_vision.
21
+
22
+ Tokens can attend to valid inputs tokens which have a cumulative mask_ar
23
+ smaller or equal to theirs. This way `mask_ar` bool[?B, N] can be used to
24
+ setup several types of attention, for example:
25
+
26
+ [[1 1 1 1 1 1]]: pure causal attention.
27
+
28
+ [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
29
+ themselves and the last 3 tokens have a causal attention. The first
30
+ entry could also be a 1 without changing behaviour.
31
+
32
+ [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
33
+ block can attend all previous blocks and all tokens on the same block.
34
+
35
+ Args:
36
+ input_mask: bool[B, N] true if its part of the input, false if padding.
37
+ mask_ar: bool[?B, N] mask that's true where previous tokens cannot depend on
38
+ it and false where it shares the same attention mask as the previous token.
39
+ """
40
+ mask_ar = jnp.broadcast_to(mask_ar, input_mask.shape)
41
+ cumsum = jnp.cumsum(mask_ar, axis=1)
42
+ attn_mask = cumsum[:, None, :] <= cumsum[:, :, None]
43
+ valid_mask = input_mask[:, None, :] * input_mask[:, :, None]
44
+ return jnp.logical_and(attn_mask, valid_mask)
45
+
46
+
47
+ @at.typecheck
48
+ def posemb_sincos(
49
+ pos: at.Real[at.Array, " b"], embedding_dim: int, min_period: float, max_period: float
50
+ ) -> at.Float[at.Array, "b {embedding_dim}"]:
51
+ """Computes sine-cosine positional embedding vectors for scalar positions."""
52
+ if embedding_dim % 2 != 0:
53
+ raise ValueError(f"embedding_dim ({embedding_dim}) must be divisible by 2")
54
+
55
+ fraction = jnp.linspace(0.0, 1.0, embedding_dim // 2)
56
+ period = min_period * (max_period / min_period) ** fraction
57
+ sinusoid_input = jnp.einsum(
58
+ "i,j->ij",
59
+ pos,
60
+ 1.0 / period * 2 * jnp.pi,
61
+ precision=jax.lax.Precision.HIGHEST,
62
+ )
63
+ return jnp.concatenate([jnp.sin(sinusoid_input), jnp.cos(sinusoid_input)], axis=-1)
64
+
65
+
66
+ class Pi0(_model.BaseModel):
67
+ def __init__(self, config: pi0_config.Pi0Config, rngs: nnx.Rngs):
68
+ super().__init__(config.action_dim, config.action_horizon, config.max_token_len)
69
+ self.pi05 = config.pi05
70
+ paligemma_config = _gemma.get_config(config.paligemma_variant)
71
+ action_expert_config = _gemma.get_config(config.action_expert_variant)
72
+ # TODO: rewrite gemma in NNX. For now, use bridge.
73
+ llm = nnx_bridge.ToNNX(
74
+ _gemma.Module(
75
+ configs=[paligemma_config, action_expert_config],
76
+ embed_dtype=config.dtype,
77
+ adarms=config.pi05,
78
+ )
79
+ )
80
+ llm.lazy_init(rngs=rngs, method="init", use_adarms=[False, True] if config.pi05 else [False, False])
81
+ img = nnx_bridge.ToNNX(
82
+ _siglip.Module(
83
+ num_classes=paligemma_config.width,
84
+ variant="So400m/14",
85
+ pool_type="none",
86
+ scan=True,
87
+ dtype_mm=config.dtype,
88
+ )
89
+ )
90
+ img.lazy_init(next(iter(config.fake_obs().images.values())), train=False, rngs=rngs)
91
+ self.PaliGemma = nnx.Dict(llm=llm, img=img)
92
+ self.action_in_proj = nnx.Linear(config.action_dim, action_expert_config.width, rngs=rngs)
93
+ if config.pi05:
94
+ self.time_mlp_in = nnx.Linear(action_expert_config.width, action_expert_config.width, rngs=rngs)
95
+ self.time_mlp_out = nnx.Linear(action_expert_config.width, action_expert_config.width, rngs=rngs)
96
+ else:
97
+ self.state_proj = nnx.Linear(config.action_dim, action_expert_config.width, rngs=rngs)
98
+ self.action_time_mlp_in = nnx.Linear(2 * action_expert_config.width, action_expert_config.width, rngs=rngs)
99
+ self.action_time_mlp_out = nnx.Linear(action_expert_config.width, action_expert_config.width, rngs=rngs)
100
+ self.action_out_proj = nnx.Linear(action_expert_config.width, config.action_dim, rngs=rngs)
101
+
102
+ # This attribute gets automatically set by model.train() and model.eval().
103
+ self.deterministic = True
104
+
105
+ @at.typecheck
106
+ def embed_prefix(
107
+ self, obs: _model.Observation
108
+ ) -> tuple[at.Float[at.Array, "b s emb"], at.Bool[at.Array, "b s"], at.Bool[at.Array, " s"]]:
109
+ input_mask = []
110
+ ar_mask = []
111
+ tokens = []
112
+ # embed images
113
+ for name in obs.images:
114
+ image_tokens, _ = self.PaliGemma.img(obs.images[name], train=False)
115
+
116
+ tokens.append(image_tokens)
117
+ input_mask.append(
118
+ einops.repeat(
119
+ obs.image_masks[name],
120
+ "b -> b s",
121
+ s=image_tokens.shape[1],
122
+ )
123
+ )
124
+ # image tokens attend to each other
125
+ ar_mask += [False] * image_tokens.shape[1]
126
+
127
+ # add language (aka tokenized inputs)
128
+ if obs.tokenized_prompt is not None:
129
+ tokenized_inputs = self.PaliGemma.llm(obs.tokenized_prompt, method="embed")
130
+ tokens.append(tokenized_inputs)
131
+ input_mask.append(obs.tokenized_prompt_mask)
132
+ # full attention between image and language inputs
133
+ ar_mask += [False] * tokenized_inputs.shape[1]
134
+ tokens = jnp.concatenate(tokens, axis=1)
135
+ input_mask = jnp.concatenate(input_mask, axis=1)
136
+ ar_mask = jnp.array(ar_mask)
137
+ return tokens, input_mask, ar_mask
138
+
139
+ @at.typecheck
140
+ def embed_suffix(
141
+ self, obs: _model.Observation, noisy_actions: _model.Actions, timestep: at.Float[at.Array, " b"]
142
+ ) -> tuple[
143
+ at.Float[at.Array, "b s emb"],
144
+ at.Bool[at.Array, "b s"],
145
+ at.Bool[at.Array, " s"],
146
+ at.Float[at.Array, "b emb"] | None,
147
+ ]:
148
+ input_mask = []
149
+ ar_mask = []
150
+ tokens = []
151
+ if not self.pi05:
152
+ # add a single state token
153
+ state_token = self.state_proj(obs.state)[:, None, :]
154
+ tokens.append(state_token)
155
+ input_mask.append(jnp.ones((obs.state.shape[0], 1), dtype=jnp.bool_))
156
+ # image/language inputs do not attend to state or actions
157
+ ar_mask += [True]
158
+
159
+ action_tokens = self.action_in_proj(noisy_actions)
160
+ # embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
161
+ time_emb = posemb_sincos(timestep, self.action_in_proj.out_features, min_period=4e-3, max_period=4.0)
162
+ if self.pi05:
163
+ # time MLP (for adaRMS)
164
+ time_emb = self.time_mlp_in(time_emb)
165
+ time_emb = nnx.swish(time_emb)
166
+ time_emb = self.time_mlp_out(time_emb)
167
+ time_emb = nnx.swish(time_emb)
168
+ action_expert_tokens = action_tokens
169
+ adarms_cond = time_emb
170
+ else:
171
+ # mix timestep + action information using an MLP (no adaRMS)
172
+ time_tokens = einops.repeat(time_emb, "b emb -> b s emb", s=self.action_horizon)
173
+ action_time_tokens = jnp.concatenate([action_tokens, time_tokens], axis=-1)
174
+ action_time_tokens = self.action_time_mlp_in(action_time_tokens)
175
+ action_time_tokens = nnx.swish(action_time_tokens)
176
+ action_time_tokens = self.action_time_mlp_out(action_time_tokens)
177
+ action_expert_tokens = action_time_tokens
178
+ adarms_cond = None
179
+ tokens.append(action_expert_tokens)
180
+ input_mask.append(jnp.ones(action_expert_tokens.shape[:2], dtype=jnp.bool_))
181
+ # image/language/state inputs do not attend to action tokens
182
+ ar_mask += [True] + ([False] * (self.action_horizon - 1))
183
+ tokens = jnp.concatenate(tokens, axis=1)
184
+ input_mask = jnp.concatenate(input_mask, axis=1)
185
+ ar_mask = jnp.array(ar_mask)
186
+ return tokens, input_mask, ar_mask, adarms_cond
187
+
188
+ @override
189
+ def compute_loss(
190
+ self, rng: at.KeyArrayLike, observation: _model.Observation, actions: _model.Actions, *, train: bool = False
191
+ ) -> at.Float[at.Array, "*b ah"]:
192
+ preprocess_rng, noise_rng, time_rng = jax.random.split(rng, 3)
193
+ observation = _model.preprocess_observation(
194
+ preprocess_rng, observation, train=train, image_keys=list(observation.images.keys())
195
+ )
196
+
197
+ batch_shape = actions.shape[:-2]
198
+ noise = jax.random.normal(noise_rng, actions.shape)
199
+ time = jax.random.beta(time_rng, 1.5, 1, batch_shape) * 0.999 + 0.001
200
+ time_expanded = time[..., None, None]
201
+ x_t = time_expanded * noise + (1 - time_expanded) * actions
202
+ u_t = noise - actions
203
+
204
+ # one big forward pass of prefix + suffix at once
205
+ prefix_tokens, prefix_mask, prefix_ar_mask = self.embed_prefix(observation)
206
+ suffix_tokens, suffix_mask, suffix_ar_mask, adarms_cond = self.embed_suffix(observation, x_t, time)
207
+ input_mask = jnp.concatenate([prefix_mask, suffix_mask], axis=1)
208
+ ar_mask = jnp.concatenate([prefix_ar_mask, suffix_ar_mask], axis=0)
209
+ attn_mask = make_attn_mask(input_mask, ar_mask)
210
+ positions = jnp.cumsum(input_mask, axis=1) - 1
211
+ (prefix_out, suffix_out), _ = self.PaliGemma.llm(
212
+ [prefix_tokens, suffix_tokens], mask=attn_mask, positions=positions, adarms_cond=[None, adarms_cond]
213
+ )
214
+ v_t = self.action_out_proj(suffix_out[:, -self.action_horizon :])
215
+
216
+ return jnp.mean(jnp.square(v_t - u_t), axis=-1)
217
+
218
+ @override
219
+ def sample_actions(
220
+ self,
221
+ rng: at.KeyArrayLike,
222
+ observation: _model.Observation,
223
+ *,
224
+ num_steps: int | at.Int[at.Array, ""] = 10,
225
+ noise: at.Float[at.Array, "b ah ad"] | None = None,
226
+ ) -> _model.Actions:
227
+ observation = _model.preprocess_observation(
228
+ None, observation, train=False, image_keys=list(observation.images.keys())
229
+ )
230
+ # note that we use the convention more common in diffusion literature, where t=1 is noise and t=0 is the target
231
+ # distribution. yes, this is the opposite of the pi0 paper, and I'm sorry.
232
+ dt = -1.0 / num_steps
233
+ batch_size = observation.state.shape[0]
234
+ if noise is None:
235
+ noise = jax.random.normal(rng, (batch_size, self.action_horizon, self.action_dim))
236
+
237
+ # first fill KV cache with a forward pass of the prefix
238
+ prefix_tokens, prefix_mask, prefix_ar_mask = self.embed_prefix(observation)
239
+ prefix_attn_mask = make_attn_mask(prefix_mask, prefix_ar_mask)
240
+ positions = jnp.cumsum(prefix_mask, axis=1) - 1
241
+ _, kv_cache = self.PaliGemma.llm([prefix_tokens, None], mask=prefix_attn_mask, positions=positions)
242
+
243
+ def step(carry):
244
+ x_t, time = carry
245
+ suffix_tokens, suffix_mask, suffix_ar_mask, adarms_cond = self.embed_suffix(
246
+ observation, x_t, jnp.broadcast_to(time, batch_size)
247
+ )
248
+ # `suffix_attn_mask` is shape (b, suffix_len, suffix_len) indicating how the suffix tokens can attend to each
249
+ # other
250
+ suffix_attn_mask = make_attn_mask(suffix_mask, suffix_ar_mask)
251
+ # `prefix_attn_mask` is shape (b, suffix_len, prefix_len) indicating how the suffix tokens can attend to the
252
+ # prefix tokens
253
+ prefix_attn_mask = einops.repeat(prefix_mask, "b p -> b s p", s=suffix_tokens.shape[1])
254
+ # `combined_mask` is shape (b, suffix_len, prefix_len + suffix_len) indicating how the suffix tokens (which
255
+ # generate the queries) can attend to the full prefix + suffix sequence (which generates the keys and values)
256
+ full_attn_mask = jnp.concatenate([prefix_attn_mask, suffix_attn_mask], axis=-1)
257
+ assert full_attn_mask.shape == (
258
+ batch_size,
259
+ suffix_tokens.shape[1],
260
+ prefix_tokens.shape[1] + suffix_tokens.shape[1],
261
+ )
262
+ # `positions` is shape (b, suffix_len) indicating the positions of the suffix tokens
263
+ positions = jnp.sum(prefix_mask, axis=-1)[:, None] + jnp.cumsum(suffix_mask, axis=-1) - 1
264
+
265
+ (prefix_out, suffix_out), _ = self.PaliGemma.llm(
266
+ [None, suffix_tokens],
267
+ mask=full_attn_mask,
268
+ positions=positions,
269
+ kv_cache=kv_cache,
270
+ adarms_cond=[None, adarms_cond],
271
+ )
272
+ assert prefix_out is None
273
+ v_t = self.action_out_proj(suffix_out[:, -self.action_horizon :])
274
+
275
+ return x_t + dt * v_t, time + dt
276
+
277
+ def cond(carry):
278
+ x_t, time = carry
279
+ # robust to floating-point error
280
+ return time >= -dt / 2
281
+
282
+ x_0, _ = jax.lax.while_loop(cond, step, (noise, 1.0))
283
+ return x_0
src/openpi/models/pi0_config.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import TYPE_CHECKING
3
+
4
+ import flax.nnx as nnx
5
+ import jax
6
+ import jax.numpy as jnp
7
+ from typing_extensions import override
8
+
9
+ from openpi.models import model as _model
10
+ import openpi.models.gemma as _gemma
11
+ from openpi.shared import array_typing as at
12
+ import openpi.shared.nnx_utils as nnx_utils
13
+
14
+ if TYPE_CHECKING:
15
+ from openpi.models.pi0 import Pi0
16
+
17
+
18
+ @dataclasses.dataclass(frozen=True)
19
+ class Pi0Config(_model.BaseModelConfig):
20
+ dtype: str = "bfloat16"
21
+ paligemma_variant: _gemma.Variant = "gemma_2b"
22
+ action_expert_variant: _gemma.Variant = "gemma_300m"
23
+
24
+ # Set the model specific defaults.
25
+ action_dim: int = 32
26
+ action_horizon: int = 50
27
+ max_token_len: int = None # type: ignore
28
+ # Pi05 has two differences from Pi0:
29
+ # - the state input is part of the discrete language tokens rather than a continuous input that is part of the suffix
30
+ # - the action expert uses adaRMSNorm to inject the flow matching timestep
31
+ pi05: bool = False
32
+ # This config option is not used directly by the model, but it is read by the ModelTransformFactory.
33
+ discrete_state_input: bool = None # type: ignore
34
+
35
+ def __post_init__(self):
36
+ if self.max_token_len is None:
37
+ object.__setattr__(self, "max_token_len", 200 if self.pi05 else 48)
38
+ if self.discrete_state_input is None:
39
+ object.__setattr__(self, "discrete_state_input", self.pi05)
40
+
41
+ @property
42
+ @override
43
+ def model_type(self) -> _model.ModelType:
44
+ if self.pi05:
45
+ return _model.ModelType.PI05
46
+ return _model.ModelType.PI0
47
+
48
+ @override
49
+ def create(self, rng: at.KeyArrayLike) -> "Pi0":
50
+ from openpi.models.pi0 import Pi0
51
+
52
+ return Pi0(self, rngs=nnx.Rngs(rng))
53
+
54
+ @override
55
+ def inputs_spec(self, *, batch_size: int = 1) -> tuple[_model.Observation, _model.Actions]:
56
+ image_spec = jax.ShapeDtypeStruct([batch_size, *_model.IMAGE_RESOLUTION, 3], jnp.float32)
57
+ image_mask_spec = jax.ShapeDtypeStruct([batch_size], jnp.bool_)
58
+
59
+ with at.disable_typechecking():
60
+ observation_spec = _model.Observation(
61
+ images={
62
+ "base_0_rgb": image_spec,
63
+ "left_wrist_0_rgb": image_spec,
64
+ "right_wrist_0_rgb": image_spec,
65
+ },
66
+ image_masks={
67
+ "base_0_rgb": image_mask_spec,
68
+ "left_wrist_0_rgb": image_mask_spec,
69
+ "right_wrist_0_rgb": image_mask_spec,
70
+ },
71
+ state=jax.ShapeDtypeStruct([batch_size, self.action_dim], jnp.float32),
72
+ tokenized_prompt=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.int32),
73
+ tokenized_prompt_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], bool),
74
+ )
75
+ action_spec = jax.ShapeDtypeStruct([batch_size, self.action_horizon, self.action_dim], jnp.float32)
76
+
77
+ return observation_spec, action_spec
78
+
79
+ def get_freeze_filter(self) -> nnx.filterlib.Filter:
80
+ """Returns the freeze filter based on the model config."""
81
+ filters = []
82
+ has_lora = False
83
+ gemma_params_filter = nnx_utils.PathRegex(".*llm.*")
84
+ action_expert_params_filter = nnx_utils.PathRegex(".*llm.*_1.*")
85
+ if "lora" in self.paligemma_variant:
86
+ filters.append(
87
+ gemma_params_filter,
88
+ )
89
+ if "lora" not in self.action_expert_variant:
90
+ # If only freeze gemma params, exclude action expert params.
91
+ filters.append(
92
+ nnx.Not(action_expert_params_filter),
93
+ )
94
+ has_lora = True
95
+ elif "lora" in self.action_expert_variant:
96
+ filters.append(
97
+ action_expert_params_filter,
98
+ )
99
+ has_lora = True
100
+
101
+ if has_lora:
102
+ # If any lora is used, exclude all lora params.
103
+ filters.append(
104
+ nnx.Not(nnx_utils.PathRegex(".*lora.*")),
105
+ )
106
+ if not filters:
107
+ return nnx.Nothing
108
+ return nnx.All(*filters)
src/openpi/models/pi0_fast.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import logging
3
+ from typing import Any
4
+
5
+ import einops
6
+ import flax.nnx as nnx
7
+ import flax.nnx.bridge as nnx_bridge
8
+ import jax
9
+ import jax.numpy as jnp
10
+ from typing_extensions import override
11
+
12
+ from openpi.models import model as _model
13
+ import openpi.models.gemma_fast as _gemma
14
+ import openpi.models.siglip as _siglip
15
+ from openpi.shared import array_typing as at
16
+ import openpi.shared.nnx_utils as nnx_utils
17
+
18
+ logger = logging.getLogger("openpi")
19
+
20
+ PALIGEMMA_EOS_TOKEN = 1
21
+
22
+
23
+ def make_attn_mask(input_mask, mask_ar):
24
+ """Adapted from big_vision.
25
+
26
+ Tokens can attend to valid inputs tokens which have a cumulative mask_ar
27
+ smaller or equal to theirs. This way `mask_ar` bool[?B, N] can be used to
28
+ setup several types of attention, for example:
29
+
30
+ [[1 1 1 1 1 1]]: pure causal attention.
31
+
32
+ [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
33
+ themselves and the last 3 tokens have a causal attention. The first
34
+ entry could also be a 1 without changing behaviour.
35
+
36
+ [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
37
+ block can attend all previous blocks and all tokens on the same block.
38
+
39
+ Args:
40
+ input_mask: bool[B, N] true if its part of the input, false if padding.
41
+ mask_ar: bool[?B, N] mask that's true where previous tokens cannot depend on
42
+ it and false where it shares the same attention mask as the previous token.
43
+ """
44
+ mask_ar = jnp.broadcast_to(mask_ar, input_mask.shape)
45
+ cumsum = jnp.cumsum(mask_ar, axis=1)
46
+ attn_mask = cumsum[:, None, :] <= cumsum[:, :, None]
47
+ valid_mask = input_mask[:, None, :] * input_mask[:, :, None]
48
+ return jnp.logical_and(attn_mask, valid_mask)
49
+
50
+
51
+ @jax.vmap
52
+ def left_to_right_align(x, input_mask, attn_mask):
53
+ """Converts input from left-align to right-aligned."""
54
+ # Due to vmap, this is operating in a single example (not batch level).
55
+ assert x.ndim == 2
56
+ assert input_mask.ndim == 1
57
+ assert attn_mask.ndim == 2
58
+ assert x.shape[0] == input_mask.shape[0]
59
+ assert attn_mask.shape[0] == attn_mask.shape[1], attn_mask.shape
60
+ seqlen = jnp.max(input_mask * jnp.arange(input_mask.shape[0])) + 1
61
+ x = jnp.roll(x, -seqlen, axis=0)
62
+ input_mask = jnp.roll(input_mask, -seqlen, axis=0)
63
+ attn_mask = jnp.roll(attn_mask, -seqlen, axis=(0, 1))
64
+ return x, input_mask, attn_mask
65
+
66
+
67
+ def put_along_last_axis(arr, indices, values):
68
+ """Like np.put_along_axis(..., axis=-1), since jax is missing it."""
69
+ assert arr.ndim == indices.ndim == values.ndim, (arr.ndim, indices.ndim, values.ndim)
70
+ onehot = jax.nn.one_hot(indices, arr.shape[-1], dtype=values.dtype)
71
+ put_mask = jnp.einsum("...i,...in->...n", jnp.ones(values.shape, jnp.int32), onehot)
72
+ put_values = jnp.einsum("...i,...in->...n", values, onehot)
73
+ return jnp.where(put_mask, put_values, arr)
74
+
75
+
76
+ @dataclasses.dataclass(frozen=True)
77
+ class Pi0FASTConfig(_model.BaseModelConfig):
78
+ dtype: str = "bfloat16"
79
+ paligemma_variant: _gemma.Variant = "gemma_2b"
80
+
81
+ # Set the model specific defaults.
82
+ action_dim: int = 32
83
+ action_horizon: int = 32
84
+ max_token_len: int = 250
85
+
86
+ # Tokenizer for the fast model.
87
+ fast_model_tokenizer: Any | None = None
88
+ # Keyword arguments for the fast model tokenizer.
89
+ fast_model_tokenizer_kwargs: dict[str, Any] | None = None
90
+
91
+ @property
92
+ @override
93
+ def model_type(self) -> _model.ModelType:
94
+ return _model.ModelType.PI0_FAST
95
+
96
+ @override
97
+ def create(self, rng: at.KeyArrayLike) -> "Pi0FAST":
98
+ return Pi0FAST(self, rngs=nnx.Rngs(rng))
99
+
100
+ @override
101
+ def inputs_spec(self, *, batch_size: int = 1) -> tuple[_model.Observation, _model.Actions]:
102
+ image_spec = jax.ShapeDtypeStruct([batch_size, *_model.IMAGE_RESOLUTION, 3], jnp.float32)
103
+ image_mask_spec = jax.ShapeDtypeStruct([batch_size], jnp.bool_)
104
+
105
+ with at.disable_typechecking():
106
+ observation_spec = _model.Observation(
107
+ images={
108
+ "base_0_rgb": image_spec,
109
+ "base_1_rgb": image_spec,
110
+ "wrist_0_rgb": image_spec,
111
+ },
112
+ image_masks={
113
+ "base_0_rgb": image_mask_spec,
114
+ "base_1_rgb": image_mask_spec,
115
+ "wrist_0_rgb": image_mask_spec,
116
+ },
117
+ state=jax.ShapeDtypeStruct([batch_size, self.action_dim], jnp.float32),
118
+ tokenized_prompt=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.int32),
119
+ tokenized_prompt_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], bool),
120
+ token_ar_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.int32),
121
+ token_loss_mask=jax.ShapeDtypeStruct([batch_size, self.max_token_len], jnp.bool_),
122
+ )
123
+ action_spec = jax.ShapeDtypeStruct([batch_size, self.action_horizon, self.action_dim], jnp.float32)
124
+
125
+ return observation_spec, action_spec
126
+
127
+ def get_freeze_filter(self) -> nnx.filterlib.Filter:
128
+ """Returns the freeze filter based on the model config."""
129
+ if "lora" in self.paligemma_variant:
130
+ return nnx.All(nnx_utils.PathRegex(".*llm.*"), nnx.Not(nnx_utils.PathRegex(".*lora.*")))
131
+ return nnx.Nothing
132
+
133
+
134
+ class Pi0FAST(_model.BaseModel):
135
+ def __init__(self, config: Pi0FASTConfig, rngs: nnx.Rngs):
136
+ super().__init__(config.action_dim, config.action_horizon, config.max_token_len)
137
+ paligemma_config = _gemma.get_config(config.paligemma_variant)
138
+ # TODO: rewrite gemma in NNX. For now, use bridge.
139
+ llm = nnx_bridge.ToNNX(
140
+ _gemma.Module(
141
+ **paligemma_config,
142
+ embed_dtype=config.dtype,
143
+ cache_dtype=config.dtype,
144
+ )
145
+ )
146
+ llm.lazy_init(rngs=rngs, method="init")
147
+ img = nnx_bridge.ToNNX(
148
+ _siglip.Module(
149
+ num_classes=paligemma_config.width,
150
+ variant="So400m/14",
151
+ pool_type="none",
152
+ scan=True,
153
+ dtype_mm=config.dtype,
154
+ )
155
+ )
156
+ img.lazy_init(next(iter(config.fake_obs().images.values())), train=False, rngs=rngs)
157
+ self.PaliGemma = nnx.Dict(llm=llm, img=img)
158
+
159
+ @at.typecheck
160
+ def embed_inputs(
161
+ self, obs: _model.Observation
162
+ ) -> tuple[at.Float[at.Array, "b s emb"], at.Bool[at.Array, "b s"], at.Int[at.Array, "b s"]]:
163
+ input_mask = []
164
+ ar_mask = []
165
+ token_embeddings = []
166
+ # embed images
167
+ for name in obs.images:
168
+ image_token_embeddings, _ = self.PaliGemma.img(obs.images[name], train=False)
169
+
170
+ token_embeddings.append(image_token_embeddings)
171
+ input_mask.append(
172
+ einops.repeat(
173
+ obs.image_masks[name],
174
+ "b -> b s",
175
+ s=image_token_embeddings.shape[1],
176
+ )
177
+ )
178
+ # image tokens attend to each other --> AR mask = 0
179
+ ar_mask.append(0 * input_mask[-1])
180
+
181
+ # add tokenized inputs
182
+ assert obs.tokenized_prompt is not None, "Tokenized prompt is required"
183
+ assert obs.tokenized_prompt_mask is not None, "Tokenized prompt mask is required"
184
+ assert obs.token_ar_mask is not None, "Token auto-regressive mask is required"
185
+ tokenized_inputs_embeddings = self.PaliGemma.llm(obs.tokenized_prompt, embed_only=True)
186
+ token_embeddings.append(tokenized_inputs_embeddings)
187
+ input_mask.append(obs.tokenized_prompt_mask)
188
+ ar_mask.append(obs.token_ar_mask)
189
+
190
+ # return embeddings, input mask, and ar mask
191
+ return (
192
+ jnp.concatenate(token_embeddings, axis=1),
193
+ jnp.concatenate(input_mask, axis=1),
194
+ jnp.concatenate(ar_mask, axis=1),
195
+ )
196
+
197
+ @override
198
+ def compute_loss(
199
+ self, rng: at.KeyArrayLike, observation: _model.Observation, actions: _model.Actions, *, train: bool = False
200
+ ) -> at.Float[at.Array, "*b ah"]:
201
+ observation = _model.preprocess_observation(
202
+ rng, observation, train=train, image_keys=list(observation.images.keys())
203
+ )
204
+
205
+ # Compute inputs: one big forward pass of prefix + suffix at once
206
+ input_token_embeddings, input_mask, ar_mask = self.embed_inputs(observation)
207
+ attn_mask = make_attn_mask(input_mask, ar_mask)
208
+
209
+ # Compute one-hot targets: we predict *next* token, so shift the input tokens by one.
210
+ targets = jax.nn.one_hot(
211
+ observation.tokenized_prompt[:, 1:],
212
+ self.PaliGemma.llm.module.vocab_size,
213
+ )
214
+
215
+ # Each input predicts *next* token, so we don't input the last token.
216
+ pre_logits, _, _ = self.PaliGemma.llm(
217
+ embedded_prefix=input_token_embeddings[:, :-1],
218
+ mask=attn_mask[:, :-1, :-1],
219
+ return_prelogits=True,
220
+ )
221
+
222
+ # Only decode logits for the target tokens to save memory
223
+ # (decoding matmul is large because it is a seq_len x vocab_size dense layer).
224
+ logits, _ = self.PaliGemma.llm(
225
+ pre_logits=pre_logits[:, -targets.shape[1] :],
226
+ )
227
+ logp = jax.nn.log_softmax(logits, axis=-1)
228
+
229
+ # Compute CE loss on token targets
230
+ assert observation.token_loss_mask is not None, "Token loss mask is required"
231
+ loss_mask = observation.token_loss_mask[:, 1:]
232
+ token_pplx = jnp.sum(targets * logp, axis=-1)
233
+ return -jnp.sum(token_pplx * loss_mask, axis=-1) / jnp.clip(jnp.sum(loss_mask, -1), 1)
234
+
235
+ @override
236
+ def sample_actions(
237
+ self,
238
+ rng: at.KeyArrayLike,
239
+ observation: _model.Observation,
240
+ *,
241
+ max_decoding_steps: int | at.Int[at.Array, ""] = 256,
242
+ temperature: float = 0.0,
243
+ ) -> _model.Actions:
244
+ # TODO: this is a hack to get the image keys.
245
+ observation = _model.preprocess_observation(
246
+ None, observation, train=False, image_keys=list(observation.images.keys())
247
+ )
248
+
249
+ # embed inputs
250
+ prefix_token_embeddings, prefix_mask, prefix_ar_mask = self.embed_inputs(observation)
251
+ prefix_attn_mask = make_attn_mask(prefix_mask, prefix_ar_mask)
252
+
253
+ # left to right align all input token sequences
254
+ prefix_token_embeddings, prefix_mask, prefix_attn_mask = left_to_right_align(
255
+ prefix_token_embeddings, prefix_mask, prefix_attn_mask
256
+ )
257
+ prefill_size = prefix_token_embeddings.shape[1]
258
+ prefill_len = jnp.sum(prefix_mask, axis=-1)
259
+ prefix_start = prefill_size - prefill_len
260
+
261
+ # first fill KV cache with a forward pass of the prefix
262
+ # pad attention mask to set the size of the KV cache (prefill_size + max_decoding_steps)
263
+ prefix_attn_mask = jnp.pad(prefix_attn_mask, ((0, 0), (0, 0), (0, max_decoding_steps)))
264
+ prefix_positions = jnp.cumsum(prefix_mask, axis=-1) - 1
265
+ prefix_logits, kv_cache, _ = self.PaliGemma.llm(
266
+ embedded_prefix=prefix_token_embeddings, mask=prefix_attn_mask, positions=prefix_positions, decode=True
267
+ )
268
+
269
+ # prepare decoding -- final logit decodes the first token
270
+ last_logit = prefix_logits[:, -1:]
271
+ output_tokens = jnp.zeros((last_logit.shape[0], max_decoding_steps))
272
+
273
+ def step(carry):
274
+ rng, last_logit, output_tokens, cache, _, step = carry
275
+
276
+ # Sample token from last logit
277
+ # Split RNG for this step
278
+ rng, rng_step = jax.random.split(rng)
279
+ token = jax.lax.cond(
280
+ temperature > 0.0,
281
+ lambda _: jax.random.categorical(rng_step, last_logit / temperature, axis=-1),
282
+ lambda _: jnp.argmax(last_logit, axis=-1),
283
+ operand=None,
284
+ )
285
+ output_tokens = put_along_last_axis(output_tokens, jnp.broadcast_to(step, (token.shape[0], 1)), token)
286
+
287
+ # Check for early stopping --> stop if all batch elements have EOS token
288
+ has_eos = jnp.any(token == PALIGEMMA_EOS_TOKEN, axis=-1)
289
+ all_eos = jnp.all(has_eos)
290
+
291
+ # Decode one step
292
+ token_embedding = self.PaliGemma.llm(token, embed_only=True)
293
+ positions = prefill_len[:, None] + step + 1
294
+ mask = jnp.logical_and(
295
+ jnp.arange(prefill_size + max_decoding_steps)[None, None, :] >= prefix_start[:, None, None],
296
+ jnp.arange(prefill_size + max_decoding_steps)[None, None, :]
297
+ < (jnp.broadcast_to(prefill_size + step + 1, (prefix_start.shape[0], 1, 1))),
298
+ )
299
+ last_logit, kv_cache, _ = self.PaliGemma.llm(
300
+ embedded_prefix=token_embedding, mask=mask, positions=positions, decode=True, kv_cache=cache
301
+ )
302
+
303
+ return rng, last_logit, output_tokens, kv_cache, all_eos, step + 1
304
+
305
+ def cond(carry):
306
+ _, _, _, _, all_eos, step = carry
307
+ return (~all_eos) & (step < max_decoding_steps)
308
+
309
+ # Use lax.while_loop so we can jit the full decoding loop.
310
+ _, _, output_tokens, _, _, _ = jax.lax.while_loop(
311
+ cond, step, (rng, last_logit, output_tokens, kv_cache, False, 0)
312
+ )
313
+ return output_tokens
src/openpi/models/pi0_test.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import flax.nnx as nnx
2
+ import jax
3
+
4
+ import openpi.models.pi0_config as _pi0_config
5
+
6
+
7
+ def _get_frozen_state(config: _pi0_config.Pi0Config) -> nnx.State:
8
+ abstract_model = nnx.eval_shape(config.create, jax.random.key(0))
9
+
10
+ freeze_filter = config.get_freeze_filter()
11
+ return nnx.state(abstract_model, nnx.All(nnx.Param, freeze_filter)).flat_state()
12
+
13
+
14
+ def test_pi0_full_finetune():
15
+ config = _pi0_config.Pi0Config()
16
+ state = _get_frozen_state(config)
17
+ assert len(state) == 0
18
+
19
+
20
+ def test_pi0_gemma_lora():
21
+ config = _pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora")
22
+ state = _get_frozen_state(config)
23
+ assert len(state) == 9
24
+ assert all("lora" not in p for p in state)
25
+ assert all("llm" in p for p in state)
26
+ assert all("_1" not in p for p in state)
27
+
28
+
29
+ def test_pi0_action_expert_lora():
30
+ config = _pi0_config.Pi0Config(action_expert_variant="gemma_300m_lora")
31
+ state = _get_frozen_state(config)
32
+ # excluding embedder, rest of the params should be same as gemma_lora.
33
+ assert len(state) == 8
34
+ assert all("lora" not in p for p in state)
35
+ assert all("llm" in p for p in state)
36
+ # all frozen params should have _1 in their path since it's the action expert.
37
+ assert all(any("_1" in p for p in path) for path in state)
38
+
39
+
40
+ def test_pi0_all_lora():
41
+ config = _pi0_config.Pi0Config(paligemma_variant="gemma_2b_lora", action_expert_variant="gemma_300m_lora")
42
+ state = _get_frozen_state(config)
43
+ # sum of gemma_lora and action_expert_lora's frozen params.
44
+ assert len(state) == 17
45
+ assert all("lora" not in p for p in state)
46
+ assert all("llm" in p for p in state)
src/openpi/models/siglip.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Big Vision Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """A refactored and simplified ViT adoptation for Pi, taken from big_vision."""
16
+
17
+ from collections.abc import Sequence
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+
24
+ import openpi.training.sharding as sharding
25
+
26
+
27
+ def posemb_sincos_2d(h, w, width, temperature=10_000.0, dtype=jnp.float32):
28
+ """Follows the MoCo v3 logic."""
29
+ y, x = jnp.mgrid[:h, :w]
30
+
31
+ assert width % 4 == 0, "Width must be mult of 4 for sincos posemb"
32
+ omega = jnp.arange(width // 4) / (width // 4 - 1)
33
+ omega = 1.0 / (temperature**omega)
34
+ y = jnp.einsum("m,d->md", y.flatten(), omega)
35
+ x = jnp.einsum("m,d->md", x.flatten(), omega)
36
+ pe = jnp.concatenate([jnp.sin(x), jnp.cos(x), jnp.sin(y), jnp.cos(y)], axis=1)
37
+ return jnp.asarray(pe, dtype)[None, :, :]
38
+
39
+
40
+ def get_posemb(self, typ, seqshape, width, name, dtype=jnp.float32):
41
+ if typ == "learn":
42
+ return self.param(
43
+ name,
44
+ nn.initializers.normal(stddev=1 / np.sqrt(width)),
45
+ (1, np.prod(seqshape), width),
46
+ dtype,
47
+ )
48
+ if typ == "sincos2d":
49
+ return posemb_sincos_2d(*seqshape, width, dtype=dtype)
50
+ raise ValueError(f"Unknown posemb type: {typ}")
51
+
52
+
53
+ class MlpBlock(nn.Module):
54
+ """Transformer MLP / feed-forward block."""
55
+
56
+ mlp_dim: int | None = None # Defaults to 4x input dim
57
+ dropout: float = 0.0
58
+ dtype_mm: str = "float32"
59
+
60
+ @nn.compact
61
+ def __call__(self, x, deterministic=True): # noqa: FBT002
62
+ """Applies Transformer MlpBlock module."""
63
+ inits = {
64
+ "kernel_init": nn.initializers.xavier_uniform(),
65
+ "bias_init": nn.initializers.normal(stddev=1e-6),
66
+ }
67
+
68
+ _, _, d = x.shape # n,l,d
69
+ x = nn.Dense(self.mlp_dim or 4 * d, dtype=self.dtype_mm, **inits)(x)
70
+ x = nn.gelu(x)
71
+ x = nn.Dropout(rate=self.dropout)(x, deterministic)
72
+ return nn.Dense(d, dtype=self.dtype_mm, **inits)(x)
73
+
74
+
75
+ class Encoder1DBlock(nn.Module):
76
+ """Single transformer encoder block (MHSA + MLP)."""
77
+
78
+ mlp_dim: int | None = None # Defaults to 4x input dim
79
+ num_heads: int = 12
80
+ dropout: float = 0.0
81
+ dtype_mm: str = "float32"
82
+
83
+ @nn.compact
84
+ def __call__(self, x, deterministic=True): # noqa: FBT002
85
+ out = {}
86
+ x = sharding.activation_sharding_constraint(x)
87
+ y = nn.LayerNorm(dtype=self.dtype_mm)(x)
88
+ y = out["sa"] = nn.MultiHeadDotProductAttention(
89
+ num_heads=self.num_heads,
90
+ kernel_init=nn.initializers.xavier_uniform(),
91
+ deterministic=deterministic,
92
+ dtype=self.dtype_mm,
93
+ )(y, y)
94
+ y = sharding.activation_sharding_constraint(y)
95
+ y = nn.Dropout(rate=self.dropout)(y, deterministic)
96
+ x = out["+sa"] = x + y
97
+
98
+ y = nn.LayerNorm(dtype=self.dtype_mm)(x)
99
+ y = out["mlp"] = MlpBlock(
100
+ mlp_dim=self.mlp_dim,
101
+ dropout=self.dropout,
102
+ dtype_mm=self.dtype_mm,
103
+ )(y, deterministic)
104
+ y = sharding.activation_sharding_constraint(y)
105
+ y = nn.Dropout(rate=self.dropout)(y, deterministic)
106
+ x = out["+mlp"] = x + y
107
+ x = sharding.activation_sharding_constraint(x)
108
+ return x, out
109
+
110
+
111
+ class Encoder(nn.Module):
112
+ """Transformer Model Encoder for sequence to sequence translation."""
113
+
114
+ depth: int
115
+ mlp_dim: int | None = None # Defaults to 4x input dim
116
+ num_heads: int = 12
117
+ dropout: float = 0.0
118
+ scan: bool = False
119
+ remat_policy: str = "nothing_saveable"
120
+ dtype_mm: str = "float32"
121
+
122
+ @nn.compact
123
+ def __call__(self, x, deterministic=True): # noqa: FBT002
124
+ out = {}
125
+
126
+ if self.scan:
127
+ block = nn.remat(
128
+ Encoder1DBlock,
129
+ prevent_cse=False,
130
+ static_argnums=(2,), # 0=self, 2=deterministic
131
+ policy=getattr(jax.checkpoint_policies, self.remat_policy, None),
132
+ )
133
+ x, scan_out = nn.scan(
134
+ block,
135
+ variable_axes={"params": 0},
136
+ split_rngs={"params": True, "dropout": True},
137
+ in_axes=nn.broadcast,
138
+ length=self.depth,
139
+ )(
140
+ name="encoderblock",
141
+ dtype_mm=self.dtype_mm,
142
+ mlp_dim=self.mlp_dim,
143
+ num_heads=self.num_heads,
144
+ dropout=self.dropout,
145
+ )(x, deterministic)
146
+ for lyr in range(self.depth):
147
+ out[f"block{lyr:02d}"] = jax.tree.map(lambda o, lyr=lyr: o[lyr], scan_out)
148
+ else:
149
+ # Input Encoder
150
+ for lyr in range(self.depth):
151
+ block_cur = Encoder1DBlock(
152
+ name=f"encoderblock_{lyr}",
153
+ dtype_mm=self.dtype_mm,
154
+ mlp_dim=self.mlp_dim,
155
+ num_heads=self.num_heads,
156
+ dropout=self.dropout,
157
+ )
158
+ x, out[f"block{lyr:02d}"] = block_cur(x, deterministic)
159
+ out["pre_ln"] = x # Alias for last block, but without the number in it.
160
+
161
+ return nn.LayerNorm(name="encoder_norm", dtype=self.dtype_mm)(x), out
162
+
163
+
164
+ class MAPHead(nn.Module):
165
+ """Multihead Attention Pooling."""
166
+
167
+ mlp_dim: int | None = None # Defaults to 4x input dim
168
+ num_heads: int = 12
169
+ dtype_mm: str = "float32"
170
+
171
+ @nn.compact
172
+ def __call__(self, x):
173
+ n, _, d = x.shape # n,l,d
174
+ probe = self.param("probe", nn.initializers.xavier_uniform(), (1, 1, d), x.dtype)
175
+ probe = jnp.tile(probe, [n, 1, 1])
176
+
177
+ x = nn.MultiHeadDotProductAttention(
178
+ num_heads=self.num_heads,
179
+ dtype=self.dtype_mm,
180
+ kernel_init=nn.initializers.xavier_uniform(),
181
+ )(probe, x)
182
+
183
+ y = nn.LayerNorm(dtype=self.dtype_mm)(x)
184
+ x = x + MlpBlock(mlp_dim=self.mlp_dim, dtype=self.dtype_mm)(y)
185
+ return x[:, 0]
186
+
187
+
188
+ class _Module(nn.Module):
189
+ """ViT model."""
190
+
191
+ num_classes: int | None = None
192
+ patch_size: Sequence[int] = (16, 16)
193
+ width: int = 768
194
+ depth: int = 12
195
+ mlp_dim: int | None = None # Defaults to 4x input dim
196
+ num_heads: int = 12
197
+ posemb: str = "learn" # Can also be "sincos2d"
198
+ rep_size: int | bool = False
199
+ dropout: float = 0.0
200
+ pool_type: str = "gap" # Can also be "map" or "tok"
201
+ head_zeroinit: bool = True
202
+ scan: bool = False
203
+ # or "dots_with_no_batch_dims_saveable" for more speed (memory costly)
204
+ remat_policy: str = "nothing_saveable"
205
+ dtype_mm: str = "float32"
206
+
207
+ @nn.compact
208
+ def __call__(self, image, *, train=False):
209
+ out = {}
210
+
211
+ # Kevin edit: do patch extraction and posemb in float32,
212
+ # because I feel like it's a bit safer.
213
+ image = jnp.asarray(image, jnp.float32)
214
+
215
+ # Patch extraction
216
+ x = out["stem"] = nn.Conv(
217
+ self.width,
218
+ self.patch_size,
219
+ strides=self.patch_size,
220
+ padding="VALID",
221
+ name="embedding",
222
+ dtype=jnp.float32,
223
+ )(image)
224
+
225
+ n, h, w, c = x.shape
226
+ x = jnp.reshape(x, [n, h * w, c])
227
+
228
+ # Add posemb before adding extra token.
229
+ x = out["with_posemb"] = x + get_posemb(self, self.posemb, (h, w), c, "pos_embedding", jnp.float32)
230
+
231
+ if self.pool_type == "tok":
232
+ cls = self.param("cls", nn.initializers.zeros, (1, 1, c), x.dtype)
233
+ x = jnp.concatenate([jnp.tile(cls, [n, 1, 1]), x], axis=1)
234
+
235
+ n, _, c = x.shape # n,l,d
236
+ x = nn.Dropout(rate=self.dropout)(x, not train)
237
+
238
+ # Kevin edit: now cast back to dtype_mm (potentially half precision)
239
+ x = x.astype(self.dtype_mm)
240
+
241
+ x, out["encoder"] = Encoder(
242
+ depth=self.depth,
243
+ mlp_dim=self.mlp_dim,
244
+ num_heads=self.num_heads,
245
+ dropout=self.dropout,
246
+ scan=self.scan,
247
+ remat_policy=self.remat_policy,
248
+ dtype_mm=self.dtype_mm,
249
+ name="Transformer",
250
+ )(x, deterministic=not train)
251
+ encoded = out["encoded"] = x
252
+
253
+ if self.pool_type == "map":
254
+ x = out["head_input"] = MAPHead(
255
+ num_heads=self.num_heads,
256
+ mlp_dim=self.mlp_dim,
257
+ dtype=self.dtype_mm,
258
+ )(x)
259
+ elif self.pool_type == "gap":
260
+ x = out["head_input"] = jnp.mean(x, axis=1)
261
+ elif self.pool_type == "0":
262
+ x = out["head_input"] = x[:, 0]
263
+ elif self.pool_type == "tok":
264
+ x = out["head_input"] = x[:, 0]
265
+ encoded = encoded[:, 1:]
266
+ elif self.pool_type == "none":
267
+ pass
268
+ else:
269
+ raise ValueError(f"Unknown pool type: '{self.pool_type}'")
270
+
271
+ x_2d = jnp.reshape(encoded, [n, h, w, -1])
272
+
273
+ if self.rep_size:
274
+ rep_size = self.width if self.rep_size is True else self.rep_size
275
+ hid = nn.Dense(rep_size, dtype=self.dtype_mm, name="pre_logits")
276
+ # NOTE: In the past we did not include tanh in pre_logits.
277
+ # For few-shot, it should not matter much, as it whitens anyways.
278
+ x_2d = nn.tanh(hid(x_2d))
279
+ x = nn.tanh(hid(x))
280
+
281
+ out["pre_logits_2d"] = x_2d
282
+ out["pre_logits"] = x
283
+
284
+ if self.num_classes:
285
+ kw = {"kernel_init": nn.initializers.zeros} if self.head_zeroinit else {}
286
+ head = nn.Dense(self.num_classes, dtype=self.dtype_mm, name="head", **kw)
287
+ x_2d = out["logits_2d"] = head(x_2d)
288
+ x = out["logits"] = head(x)
289
+
290
+ return x, out
291
+
292
+
293
+ def Module(num_classes=None, *, variant=None, **kw): # pylint: disable=invalid-name # noqa: N802
294
+ """Factory function, because linen really don't like what I'm doing!"""
295
+ return _Module(num_classes, **{**decode_variant(variant), **kw})
296
+
297
+
298
+ def decode_variant(variant):
299
+ """Converts a string like "B" or "B/32" into a params dict."""
300
+ if variant is None:
301
+ return {}
302
+
303
+ v, patch = variant, {}
304
+ if "/" in variant:
305
+ v, patch = variant.split("/")
306
+ patch = {"patch_size": (int(patch), int(patch))}
307
+
308
+ return {
309
+ # pylint:disable=line-too-long
310
+ # Reference: Table 2 of https://arxiv.org/abs/2106.04560.
311
+ "width": {
312
+ "mu": 32,
313
+ "Ti": 192,
314
+ "S": 384,
315
+ "M": 512,
316
+ "B": 768,
317
+ "L": 1024,
318
+ "So400m": 1152,
319
+ "H": 1280,
320
+ "g": 1408,
321
+ "g-opt": 1536,
322
+ "G": 1664,
323
+ "G-opt": 1536,
324
+ "e": 1792,
325
+ }[v],
326
+ "depth": {
327
+ "mu": 1,
328
+ "Ti": 12,
329
+ "S": 12,
330
+ "M": 12,
331
+ "B": 12,
332
+ "L": 24,
333
+ "So400m": 27,
334
+ "H": 32,
335
+ "g": 40,
336
+ "g-opt": 40,
337
+ "G": 48,
338
+ "G-opt": 48,
339
+ "e": 56,
340
+ }[v],
341
+ "mlp_dim": {
342
+ "mu": 128,
343
+ "Ti": 768,
344
+ "S": 1536,
345
+ "M": 2048,
346
+ "B": 3072,
347
+ "L": 4096,
348
+ "So400m": 4304,
349
+ "H": 5120,
350
+ "g": 6144,
351
+ "g-opt": 6144,
352
+ "G": 8192,
353
+ "G-opt": 8192,
354
+ "e": 15360,
355
+ }[v],
356
+ "num_heads": {
357
+ "mu": 2,
358
+ "Ti": 3,
359
+ "S": 6,
360
+ "M": 8,
361
+ "B": 12,
362
+ "L": 16,
363
+ "So400m": 16,
364
+ "H": 16,
365
+ "g": 16,
366
+ "g-opt": 16,
367
+ "G": 16,
368
+ "G-opt": 16,
369
+ "e": 16,
370
+ }[v],
371
+ # pylint:enable=line-too-long
372
+ **patch,
373
+ }
src/openpi/models/tokenizer.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ import jax
5
+ import numpy as np
6
+ import orbax.checkpoint as ocp
7
+ import sentencepiece
8
+ from transformers import AutoProcessor
9
+
10
+ import openpi.models.utils.fsq_tokenizer as fsq_tokenizer
11
+ import openpi.shared.download as download
12
+
13
+
14
+ class PaligemmaTokenizer:
15
+ def __init__(self, max_len: int = 48):
16
+ self._max_len = max_len
17
+
18
+ path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"})
19
+ with path.open("rb") as f:
20
+ self._tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read())
21
+
22
+ def tokenize(self, prompt: str, state: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray]:
23
+ cleaned_text = prompt.strip().replace("_", " ").replace("\n", " ")
24
+ if state is not None:
25
+ # This is the Pi05 format, where the state is part of the discrete language input.
26
+ discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1
27
+ state_str = " ".join(map(str, discretized_state))
28
+ full_prompt = f"Task: {cleaned_text}, State: {state_str};\nAction: "
29
+ tokens = self._tokenizer.encode(full_prompt, add_bos=True)
30
+ else:
31
+ # This is the Pi0 format, where the state is part of the continuous action expert input.
32
+ # tokenize "\n" separately as the "start of answer" token
33
+ tokens = self._tokenizer.encode(cleaned_text, add_bos=True) + self._tokenizer.encode("\n")
34
+ tokens_len = len(tokens)
35
+ if tokens_len < self._max_len:
36
+ padding = [False] * (self._max_len - tokens_len)
37
+ mask = [True] * tokens_len + padding
38
+ tokens = tokens + padding
39
+ else:
40
+ if len(tokens) > self._max_len:
41
+ logging.warning(
42
+ f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. "
43
+ "Consider increasing the `max_token_len` in your model config if this happens frequently."
44
+ )
45
+ tokens = tokens[: self._max_len]
46
+ mask = [True] * self._max_len
47
+
48
+ return np.asarray(tokens), np.asarray(mask)
49
+
50
+
51
+ class FASTTokenizer:
52
+ def __init__(self, max_len: int = 256, fast_tokenizer_path: str = "physical-intelligence/fast"):
53
+ self._max_len = max_len
54
+
55
+ # Download base PaliGemma tokenizer
56
+ path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"})
57
+ with path.open("rb") as f:
58
+ self._paligemma_tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read())
59
+
60
+ # Instantiate FAST tokenizer
61
+ self._fast_tokenizer = AutoProcessor.from_pretrained(fast_tokenizer_path, trust_remote_code=True)
62
+ self._fast_skip_tokens = 128 # Skip last 128 tokens in PaliGemma vocab since they are special tokens
63
+
64
+ def tokenize(
65
+ self, prompt: str, state: np.ndarray, actions: np.ndarray | None
66
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
67
+ cleaned_text = prompt.lower().strip().replace("_", " ")
68
+
69
+ # Convention: state gets discretized into 256 discrete bins (assumed range after normalization: [-1, 1])
70
+ discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1
71
+
72
+ # Convention: prefix includes prompt and string-representation of state, followed by ';'
73
+ state_str = " ".join(map(str, discretized_state))
74
+ prefix = f"Task: {cleaned_text}, State: {state_str};\n"
75
+ prefix_tokens = self._paligemma_tokenizer.encode(prefix, add_bos=True)
76
+
77
+ if actions is not None:
78
+ # Tokenize actions with FAST tokenizer --> map to last tokens in PaliGemma vocab
79
+ action_tokens = self._fast_tokenizer(actions[None])[0]
80
+ action_tokens_in_pg = self._act_tokens_to_paligemma_tokens(action_tokens)
81
+
82
+ # Convention: postfix contains 'Action:' followed by FAST tokens, followed by '|'
83
+ postfix_tokens = (
84
+ self._paligemma_tokenizer.encode("Action: ")
85
+ + action_tokens_in_pg.tolist()
86
+ + self._paligemma_tokenizer.encode("|", add_eos=True)
87
+ )
88
+ else:
89
+ postfix_tokens = []
90
+
91
+ # Create output token sequence & masks
92
+ # AR mask is 0 on prefix (bidirectional attention) and 1 on postfix (causal attention to all previous tokens)
93
+ tokens = prefix_tokens + postfix_tokens
94
+ token_mask = [True] * len(tokens)
95
+ ar_mask = [0] * len(prefix_tokens) + [1] * len(postfix_tokens)
96
+ loss_mask = [False] * len(prefix_tokens) + [True] * len(postfix_tokens) # Loss on postfix only
97
+
98
+ # Pad tokens to max length
99
+ tokens_len = len(tokens)
100
+ if tokens_len < self._max_len:
101
+ padding = [False] * (self._max_len - tokens_len)
102
+ tokens = tokens + padding
103
+ token_mask = token_mask + padding
104
+ ar_mask = ar_mask + padding
105
+ loss_mask = loss_mask + padding
106
+ else:
107
+ if len(tokens) > self._max_len:
108
+ logging.warning(
109
+ f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. "
110
+ "Consider increasing the `max_token_len` in your model config if this happens frequently."
111
+ )
112
+ tokens = tokens[: self._max_len]
113
+ token_mask = token_mask[: self._max_len]
114
+ ar_mask = ar_mask[: self._max_len]
115
+ loss_mask = loss_mask[: self._max_len]
116
+
117
+ return np.asarray(tokens), np.asarray(token_mask), np.asarray(ar_mask), np.asarray(loss_mask)
118
+
119
+ def extract_actions(self, tokens: np.ndarray, action_horizon: int, action_dim: int) -> np.ndarray:
120
+ # Decode predicted output tokens
121
+ decoded_tokens = self._paligemma_tokenizer.decode(tokens.tolist())
122
+
123
+ # Extract actions from FAST model outputs
124
+ if "Action: " not in decoded_tokens:
125
+ return np.zeros((action_horizon, action_dim), dtype=np.float32)
126
+
127
+ # Extract actions from decoded tokens
128
+ raw_action_tokens = np.array(
129
+ self._paligemma_tokenizer.encode(decoded_tokens.split("Action: ")[1].split("|")[0].strip())
130
+ )
131
+ action_tokens = self._act_tokens_to_paligemma_tokens(raw_action_tokens)
132
+ return self._fast_tokenizer.decode(
133
+ [action_tokens.tolist()], time_horizon=action_horizon, action_dim=action_dim
134
+ )[0]
135
+
136
+ def _act_tokens_to_paligemma_tokens(self, tokens: np.ndarray | list[int]) -> np.ndarray:
137
+ if isinstance(tokens, list):
138
+ tokens = np.array(tokens)
139
+ return self._paligemma_tokenizer.vocab_size() - 1 - self._fast_skip_tokens - tokens
140
+
141
+
142
+ ###########################################################################
143
+ ## The tokenizers below are used for RoboArena baseline implementations. ##
144
+ ## They are *not* used for pi0-style models. ##
145
+ ###########################################################################
146
+
147
+
148
+ class BinningTokenizer:
149
+ """
150
+ Standard RT-2 / OpenVLA style binning tokenizer.
151
+ """
152
+
153
+ def __init__(self, max_len: int = 256, n_bins: int = 256):
154
+ self._max_len = max_len
155
+ self._n_bins = n_bins
156
+
157
+ # Download base PaliGemma tokenizer
158
+ path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"})
159
+ with path.open("rb") as f:
160
+ self._paligemma_tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read())
161
+
162
+ self._fast_skip_tokens = 128 # Skip last 128 tokens in PaliGemma vocab since they are special tokens
163
+
164
+ def tokenize(
165
+ self, prompt: str, state: np.ndarray, actions: np.ndarray | None
166
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
167
+ """Tokenize a prompt and state into a sequence of tokens.
168
+
169
+ Args:
170
+ prompt: The text prompt to tokenize.
171
+ state: The state array to discretize and tokenize.
172
+ actions: Must be None. Action encoding is not currently supported.
173
+
174
+ Returns:
175
+ A tuple of (tokens, token_mask, ar_mask, targets).
176
+
177
+ Raises:
178
+ NotImplementedError: If actions is not None.
179
+ """
180
+ cleaned_text = prompt.lower().strip().replace("_", " ")
181
+
182
+ # Convention: state gets discretized into 256 discrete bins (assumed range after normalization: [-1, 1])
183
+ discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1
184
+
185
+ # Convention: prefix includes prompt and string-representation of state, followed by ';'
186
+ state_str = " ".join(map(str, discretized_state))
187
+ prefix = f"Task: {cleaned_text}, State: {state_str};\n"
188
+ prefix_tokens = self._paligemma_tokenizer.encode(prefix, add_bos=True)
189
+
190
+ if actions is not None:
191
+ raise NotImplementedError("BinningTokenizer does not support encoding actions atm (only for inference use)")
192
+ postfix_tokens = []
193
+
194
+ # Create output token sequence & masks
195
+ # AR mask is 0 on prefix (bidirectional attention) and 1 on postfix (causal attention to all previous tokens)
196
+ tokens = prefix_tokens + postfix_tokens
197
+ token_mask = [True] * len(tokens)
198
+ ar_mask = [0] * len(prefix_tokens) + [1] * len(postfix_tokens)
199
+ loss_mask = [False] * len(prefix_tokens) + [True] * len(postfix_tokens) # Loss on postfix only
200
+
201
+ # Pad tokens to max length
202
+ tokens_len = len(tokens)
203
+ if tokens_len < self._max_len:
204
+ padding = [False] * (self._max_len - tokens_len)
205
+ tokens = tokens + padding
206
+ token_mask = token_mask + padding
207
+ ar_mask = ar_mask + padding
208
+ loss_mask = loss_mask + padding
209
+ else:
210
+ if len(tokens) > self._max_len:
211
+ logging.warning(
212
+ f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. "
213
+ "Consider increasing the `max_token_len` in your model config if this happens frequently."
214
+ )
215
+ tokens = tokens[: self._max_len]
216
+ token_mask = token_mask[: self._max_len]
217
+ ar_mask = ar_mask[: self._max_len]
218
+ loss_mask = loss_mask[: self._max_len]
219
+
220
+ return np.asarray(tokens), np.asarray(token_mask), np.asarray(ar_mask), np.asarray(loss_mask)
221
+
222
+ def extract_actions(self, tokens: np.ndarray, action_horizon: int, action_dim: int) -> np.ndarray:
223
+ # Decode predicted output tokens
224
+ decoded_tokens = self._paligemma_tokenizer.decode(tokens.tolist())
225
+
226
+ # Extract actions from FAST model outputs
227
+ if "Action: " not in decoded_tokens:
228
+ return np.zeros((action_horizon, action_dim), dtype=np.float32)
229
+
230
+ # Extract actions from decoded tokens
231
+ raw_action_tokens = np.array(
232
+ self._paligemma_tokenizer.encode(decoded_tokens.split("Action: ")[1].split("|")[0].strip())
233
+ )
234
+ action_tokens = self._act_tokens_to_paligemma_tokens(raw_action_tokens)
235
+ if len(action_tokens) < action_horizon * action_dim:
236
+ return np.zeros([action_horizon, action_dim], dtype=np.float32)
237
+ action_tokens = action_tokens[: (action_horizon * action_dim)].reshape([action_horizon, action_dim])
238
+ return action_tokens / self._n_bins * 2 - 1
239
+
240
+ def _act_tokens_to_paligemma_tokens(self, tokens: np.ndarray | list[int]) -> np.ndarray:
241
+ if isinstance(tokens, list):
242
+ tokens = np.array(tokens)
243
+ return self._paligemma_tokenizer.vocab_size() - 1 - self._fast_skip_tokens - tokens
244
+
245
+
246
+ class FSQTokenizer:
247
+ """
248
+ FSQ tokenizer from the FAST paper baselines.
249
+ """
250
+
251
+ def __init__(self, max_len: int = 256, fsq_tokenizer_path: str | None = None):
252
+ self._max_len = max_len
253
+
254
+ assert fsq_tokenizer_path is not None, "fsq_tokenizer_path must be provided"
255
+ # Download tokenizer
256
+ path = download.maybe_download(fsq_tokenizer_path)
257
+ tok_path = os.path.join(path, os.listdir(path)[0])
258
+
259
+ # Split step from path
260
+ step = int(tok_path.split("/")[-1])
261
+ base_path = tok_path.rsplit("/", 1)[0]
262
+
263
+ mgr = ocp.CheckpointManager(
264
+ base_path,
265
+ item_handlers={
266
+ "params": ocp.StandardCheckpointHandler(),
267
+ "opt_state": ocp.StandardCheckpointHandler(),
268
+ "config": ocp.JsonCheckpointHandler(),
269
+ },
270
+ options=ocp.CheckpointManagerOptions(max_to_keep=1),
271
+ )
272
+
273
+ try:
274
+ restored = mgr.restore(
275
+ step, args=ocp.args.Composite(config=ocp.args.JsonRestore(), params=ocp.args.StandardRestore())
276
+ )
277
+ config = restored["config"]
278
+ self._params = restored["params"]
279
+ self._fsq_tokenizer = fsq_tokenizer.FsqAttentionTokenizer(**config)
280
+ except Exception as e:
281
+ raise RuntimeError(
282
+ f"Failed to load FSQ tokenizer checkpoint from {fsq_tokenizer_path}. Error: {e!s}"
283
+ ) from e
284
+
285
+ # Compile tokenize and detokenize functions
286
+ self._tokenize_fn = jax.jit(
287
+ lambda params, x: self._fsq_tokenizer.apply({"params": params}, x, method=self._fsq_tokenizer.tokenize)
288
+ )
289
+ self._detokenize_fn = jax.jit(
290
+ lambda params, x: self._fsq_tokenizer.apply({"params": params}, x, method=self._fsq_tokenizer.detokenize)
291
+ )
292
+
293
+ # Download base PaliGemma tokenizer
294
+ path = download.maybe_download("gs://big_vision/paligemma_tokenizer.model", gs={"token": "anon"})
295
+ with path.open("rb") as f:
296
+ self._paligemma_tokenizer = sentencepiece.SentencePieceProcessor(model_proto=f.read())
297
+
298
+ self._fast_skip_tokens = 128 # Skip last 128 tokens in PaliGemma vocab since they are special tokens
299
+
300
+ def tokenize(
301
+ self, prompt: str, state: np.ndarray, actions: np.ndarray | None
302
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
303
+ cleaned_text = prompt.lower().strip().replace("_", " ")
304
+
305
+ # Convention: state gets discretized into 256 discrete bins (assumed range after normalization: [-1, 1])
306
+ discretized_state = np.digitize(state, bins=np.linspace(-1, 1, 256 + 1)[:-1]) - 1
307
+
308
+ # Convention: prefix includes prompt and string-representation of state, followed by ';'
309
+ state_str = " ".join(map(str, discretized_state))
310
+ prefix = f"Task: {cleaned_text}, State: {state_str};\n"
311
+ prefix_tokens = self._paligemma_tokenizer.encode(prefix, add_bos=True)
312
+
313
+ if actions is not None:
314
+ raise NotImplementedError("FSQTokenizer does not support encoding actions atm (only for inference use)")
315
+ postfix_tokens = []
316
+
317
+ # Create output token sequence & masks
318
+ # AR mask is 0 on prefix (bidirectional attention) and 1 on postfix (causal attention to all previous tokens)
319
+ tokens = prefix_tokens + postfix_tokens
320
+ token_mask = [True] * len(tokens)
321
+ ar_mask = [0] * len(prefix_tokens) + [1] * len(postfix_tokens)
322
+ loss_mask = [False] * len(prefix_tokens) + [True] * len(postfix_tokens) # Loss on postfix only
323
+
324
+ # Pad tokens to max length
325
+ tokens_len = len(tokens)
326
+ if tokens_len < self._max_len:
327
+ padding = [False] * (self._max_len - tokens_len)
328
+ tokens = tokens + padding
329
+ token_mask = token_mask + padding
330
+ ar_mask = ar_mask + padding
331
+ loss_mask = loss_mask + padding
332
+ else:
333
+ if len(tokens) > self._max_len:
334
+ logging.warning(
335
+ f"Token length ({len(tokens)}) exceeds max length ({self._max_len}), truncating. "
336
+ "Consider increasing the `max_token_len` in your model config if this happens frequently."
337
+ )
338
+ tokens = tokens[: self._max_len]
339
+ token_mask = token_mask[: self._max_len]
340
+ ar_mask = ar_mask[: self._max_len]
341
+ loss_mask = loss_mask[: self._max_len]
342
+
343
+ return np.asarray(tokens), np.asarray(token_mask), np.asarray(ar_mask), np.asarray(loss_mask)
344
+
345
+ def extract_actions(self, tokens: np.ndarray, action_horizon: int, action_dim: int) -> np.ndarray:
346
+ # Decode predicted output tokens
347
+ decoded_tokens = self._paligemma_tokenizer.decode(tokens.tolist())
348
+
349
+ # Extract actions from FAST model outputs
350
+ if "Action: " not in decoded_tokens:
351
+ return np.zeros((action_horizon, action_dim), dtype=np.float32)
352
+
353
+ # Extract actions from decoded tokens
354
+ raw_action_tokens = np.array(
355
+ self._paligemma_tokenizer.encode(decoded_tokens.split("Action: ")[1].split("|")[0].strip())
356
+ )
357
+ action_tokens = self._act_tokens_to_paligemma_tokens(raw_action_tokens)
358
+ try:
359
+ # Move computation to CPU and compile on-demand
360
+ device = jax.devices("cpu")[0]
361
+ with jax.default_device(device):
362
+ detok_act = self._detokenize_fn(self._params, action_tokens[None, ...])[0]
363
+ return detok_act[: action_horizon * action_dim].reshape([action_horizon, action_dim])
364
+ except Exception as e:
365
+ logging.warning(f"Error decoding FSQ: {e}")
366
+ return np.zeros((action_horizon, action_dim))
367
+
368
+ def _act_tokens_to_paligemma_tokens(self, tokens: np.ndarray | list[int]) -> np.ndarray:
369
+ if isinstance(tokens, list):
370
+ tokens = np.array(tokens)
371
+ return self._paligemma_tokenizer.vocab_size() - 1 - self._fast_skip_tokens - tokens
src/openpi/models/tokenizer_test.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from openpi.models import tokenizer as _tokenizer
4
+
5
+
6
+ def test_tokenize():
7
+ tokenizer = _tokenizer.PaligemmaTokenizer(max_len=10)
8
+ tokens, masks = tokenizer.tokenize("Hello, world!")
9
+
10
+ assert tokens.shape == (10,)
11
+ assert masks.shape == (10,)
12
+
13
+
14
+ def test_fast_tokenizer():
15
+ prompt = "Hello, world!"
16
+ state = np.random.rand(5).astype(np.float32)
17
+ action = np.random.rand(3, 2).astype(np.float32)
18
+ tokenizer = _tokenizer.FASTTokenizer(max_len=256)
19
+ tokens, token_masks, ar_masks, loss_masks = tokenizer.tokenize(prompt, state, action)
20
+
21
+ assert tokens.shape == (256,)
22
+ assert token_masks.shape == (256,)
23
+ assert ar_masks.shape == (256,)
24
+ assert loss_masks.shape == (256,)
25
+
26
+ act = tokenizer.extract_actions(tokens, 3, 2)
27
+ assert act.shape == (3, 2)
src/openpi/models/utils/__pycache__/fsq_tokenizer.cpython-311.pyc ADDED
Binary file (28.6 kB). View file
 
src/openpi/models/utils/fsq_tokenizer.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Any, Literal
3
+
4
+ import chex
5
+ from einops import einops
6
+ from flax import linen as nn
7
+ from flax.linen.module import Module
8
+ from flax.linen.module import compact
9
+ from flax.struct import dataclass
10
+ from flax.typing import Array
11
+ import jax
12
+ import jax.numpy as jnp
13
+
14
+
15
+ class FsqCodebook(nn.Module):
16
+ input_dim: int
17
+ target_codebook_size: int
18
+ codebook_type: Literal["fsq", "lfq"]
19
+
20
+ _bins_per_dim: tuple[int] | None = None
21
+
22
+ @property
23
+ def bins_per_dim(self) -> tuple[int]:
24
+ if self._bins_per_dim is not None:
25
+ return self._bins_per_dim
26
+
27
+ if self.codebook_type == "fsq":
28
+ return self._get_bins_fsq(self.target_codebook_size)
29
+ elif self.codebook_type == "lfq": # noqa: RET505
30
+ return self._get_bins_lfq(self.target_codebook_size)
31
+ elif self.codebook_type == "custom":
32
+ return self._get_bins_custom(self.target_codebook_size)
33
+ else:
34
+ raise ValueError(f"Codebook type {self.codebook_type} not supported.")
35
+
36
+ @property
37
+ def place_values(self) -> jnp.ndarray:
38
+ place_values = [1]
39
+ for b in self.bins_per_dim[:-1]:
40
+ place_values.append(place_values[-1] * b)
41
+ return jnp.array(place_values)
42
+
43
+ @staticmethod
44
+ def _get_bins_fsq(target_codebook_size: int) -> tuple[int]:
45
+ """
46
+ Get bins per dimension based on codebook size, from the original FSQ paper.
47
+ """
48
+ if target_codebook_size == 2**8:
49
+ return (8, 6, 5)
50
+ elif target_codebook_size == 2**10: # noqa: RET505
51
+ return (8, 5, 5, 5)
52
+ elif target_codebook_size == 2**12:
53
+ return (7, 5, 5, 5, 5)
54
+ elif target_codebook_size == 2**14:
55
+ return (8, 8, 8, 6, 5)
56
+ elif target_codebook_size == 2**16:
57
+ return (8, 8, 8, 5, 5, 5)
58
+ else:
59
+ raise ValueError(f"Codebook size {target_codebook_size} not supported.")
60
+
61
+ @staticmethod
62
+ def _get_bins_custom(target_codebook_size: int) -> tuple[int]:
63
+ if target_codebook_size == 2**8:
64
+ return (16, 16)
65
+ elif target_codebook_size == 2**10: # noqa: RET505
66
+ return (32, 32)
67
+ elif target_codebook_size == 2**12:
68
+ return (64, 64)
69
+ elif target_codebook_size == 2**14:
70
+ return (128, 128)
71
+ elif target_codebook_size == 2**16:
72
+ return (256, 256)
73
+ return None
74
+
75
+ @staticmethod
76
+ def _get_bins_lfq(target_codebook_size: int) -> tuple[int]:
77
+ """
78
+ Get bins per dimension according to the Lookup-Free Quantization paper (2 bins per dimension)
79
+ """
80
+ assert target_codebook_size & (target_codebook_size - 1) == 0, "Codebook size should be a power of two for LFQ"
81
+
82
+ return (2,) * int(math.log2(target_codebook_size))
83
+
84
+ def setup(self):
85
+ self.proj_down = nn.Dense(len(self.bins_per_dim))
86
+ self.proj_up = nn.Dense(self.input_dim)
87
+
88
+ def __call__(self, inputs: jnp.ndarray) -> tuple[jnp.ndarray, jnp.ndarray]:
89
+ tokens, z = self.encode(inputs)
90
+ output = self.decode(tokens, z_grad=z)
91
+ return tokens, output
92
+
93
+ def encode(self, inputs: jnp.ndarray) -> tuple[jnp.ndarray, jnp.ndarray]:
94
+ bases = jnp.array(self.bins_per_dim)
95
+
96
+ x = self.proj_down(inputs)
97
+ z = jnp.tanh(x)
98
+
99
+ # Quantize
100
+ digits = jnp.round((z + 1) * (bases - 1) / 2).astype(jnp.int32)
101
+ tokens = self.undigitize(digits)
102
+
103
+ return tokens, z
104
+
105
+ def decode(self, tokens: jnp.ndarray, z_grad: jax.Array | None = None) -> jnp.ndarray:
106
+ bases = jnp.array(self.bins_per_dim)
107
+ digits = self.digitize(tokens)
108
+
109
+ z_q = digits / (bases - 1) * 2 - 1
110
+
111
+ if z_grad is not None:
112
+ chex.assert_equal_shape([z_q, z_grad])
113
+ z_q = jax.lax.stop_gradient(z_q - z_grad) + z_grad
114
+
115
+ return self.proj_up(z_q)
116
+
117
+ def undigitize(self, digits: jnp.ndarray) -> jnp.ndarray:
118
+ return jnp.sum(digits * jnp.array(self.place_values), axis=-1)
119
+
120
+ def digitize(self, tokens: jnp.ndarray) -> jnp.ndarray:
121
+ return (tokens[..., None] // jnp.array(self.place_values)) % jnp.array(self.bins_per_dim)
122
+
123
+ @property
124
+ def vocab_size(self) -> int:
125
+ return math.prod(self.bins_per_dim)
126
+
127
+
128
+ class ResNetDownBlock(nn.Module):
129
+ stride: int = 1
130
+ n_filters: int = 64
131
+ dropout_rate: float = 0.0
132
+ group_size: int = 32
133
+
134
+ @nn.compact
135
+ def __call__(self, x: jnp.ndarray, *, train: bool = True) -> jnp.ndarray:
136
+ skip = x
137
+
138
+ if self.stride > 1 or x.shape[-1] != self.n_filters:
139
+ skip = nn.Conv(self.n_filters, (self.stride,), (self.stride,), "SAME")(skip)
140
+
141
+ x = nn.Conv(self.n_filters, (3,), (self.stride,), "SAME")(x)
142
+ x = nn.GroupNorm(num_groups=self.n_filters // self.group_size)(x)
143
+ x = nn.Dropout(self.dropout_rate)(x, deterministic=not train)
144
+ x = nn.relu(x)
145
+ x = nn.Conv(self.n_filters, (3,), (1,), "SAME")(x)
146
+
147
+ return skip + x
148
+
149
+
150
+ class ResNetUpBlock(nn.Module):
151
+ stride: int = 1
152
+ n_filters: int = 64
153
+ dropout_rate: float = 0.0
154
+ group_size: int = 32
155
+
156
+ @nn.compact
157
+ def __call__(self, x: jnp.ndarray, *, train: bool = True) -> jnp.ndarray:
158
+ skip = x
159
+
160
+ if self.stride > 1:
161
+ skip = nn.ConvTranspose(self.n_filters, (self.stride,), (self.stride,), "SAME")(skip)
162
+
163
+ x = nn.ConvTranspose(self.n_filters, (3,), (self.stride,), "SAME")(x)
164
+ x = nn.GroupNorm(num_groups=self.n_filters // self.group_size)(x)
165
+ x = nn.Dropout(self.dropout_rate)(x, deterministic=not train)
166
+ x = nn.relu(x)
167
+ x = nn.ConvTranspose(self.n_filters, (3,), (1,), "SAME")(x)
168
+
169
+ return skip + x
170
+
171
+
172
+ @dataclass
173
+ class LfqCodebookOutput:
174
+ tokens: jnp.ndarray
175
+ z: jnp.ndarray
176
+ z_q: jnp.ndarray
177
+ token_log_probs: jnp.ndarray
178
+ commit_loss: jnp.ndarray
179
+
180
+
181
+ class LookupFreeQuantization(nn.Module):
182
+ num_dims: int
183
+ latent_dim: int
184
+
185
+ def setup(self):
186
+ self.codebook = jnp.array([-1, 1])
187
+ self.activation = nn.tanh
188
+
189
+ self.project_down = nn.Dense(self.num_dims)
190
+ self.project_up = nn.Dense(self.latent_dim)
191
+
192
+ def encode(self, z: jnp.ndarray) -> jnp.ndarray:
193
+ z = self.project_down(z)
194
+ token_squared_distances = jnp.square(z[..., None] - self.codebook)
195
+ token_bits = jnp.argmin(token_squared_distances, axis=-1)
196
+ return jnp.sum(token_bits * (2 ** jnp.arange(self.num_dims)), axis=-1)
197
+
198
+ def decode(self, tokens: jnp.ndarray) -> jnp.ndarray:
199
+ token_bits = (tokens[..., None] & (2 ** jnp.arange(self.num_dims))).astype(jnp.int32)
200
+ return self.project_up(self.codebook[token_bits])
201
+
202
+ def loss(self, x: jnp.ndarray) -> LfqCodebookOutput:
203
+ z = self.project_down(x)
204
+ z = self.activation(z)
205
+
206
+ token_squared_distances = jnp.square(z[..., None] - self.codebook)
207
+ tokens = jnp.argmin(token_squared_distances, axis=-1)
208
+
209
+ token_bit_log_probs = -token_squared_distances
210
+ # Compute token log probs for tokens 0..2^num_dims-1 by summing corresponding log-probs
211
+ token_bit_expansions = jnp.bitwise_and(
212
+ jnp.arange(2**self.num_dims)[None, :], 2 ** jnp.arange(self.num_dims)[:, None]
213
+ ).astype(jnp.int32)
214
+ token_log_probs = (
215
+ token_bit_log_probs[..., 0] @ (1 - token_bit_expansions)
216
+ + token_bit_log_probs[..., 1] @ token_bit_expansions
217
+ ) # (batch_size, num_tokens, 2 ** num_dims)
218
+ token_log_probs = jax.lax.stop_gradient(jax.nn.log_softmax(token_log_probs, axis=-1))
219
+ chex.assert_shape(token_log_probs, (*x.shape[:-1], 2**self.num_dims))
220
+
221
+ z_q = self.codebook[tokens]
222
+ commit_loss = jnp.square(z - z_q).mean()
223
+ z_q = jax.lax.stop_gradient(z_q - z) + z
224
+
225
+ z_q = self.project_up(z_q)
226
+ z = self.project_up(z)
227
+
228
+ tokens = jnp.sum(tokens * (len(self.codebook) ** jnp.arange(self.num_dims)), axis=-1)
229
+ return LfqCodebookOutput(
230
+ tokens=tokens,
231
+ z=z,
232
+ z_q=z_q,
233
+ token_log_probs=jnp.zeros(()),
234
+ commit_loss=commit_loss,
235
+ )
236
+
237
+
238
+ def make_block_causal_attention_matrix(q: jnp.ndarray, k: jnp.ndarray, bs_q: int, bs_k: int) -> jnp.ndarray:
239
+ return nn.make_attention_mask(q, k, pairwise_fn=lambda x, y: jnp.greater_equal(x // bs_k, y // bs_q))
240
+
241
+
242
+ class GeGLU(Module):
243
+ """Gated Linear Unit with GELU (GeGLU) activation function.
244
+ GeGLU is a Flax layer that combines a linear transformation with a GELU
245
+ activation function in a gating mechanism. It is often used in Transformer models
246
+ to provide non-linear capabilities while preserving a strong linear component.
247
+
248
+ Attributes:
249
+ features: the number of output features (default: None).
250
+ """
251
+
252
+ output_dim: int = -1
253
+
254
+ @compact
255
+ def __call__(self, inputs: Array) -> Array:
256
+ """Applies the GeGLU activation to the inputs.
257
+ Args:
258
+ inputs: the nd-array to apply the GeGLU activation function to.
259
+ Returns:
260
+ The transformed input.
261
+ """
262
+ output_dim = inputs.shape[-1] if self.output_dim == -1 else self.output_dim
263
+
264
+ x = nn.Dense(output_dim * 2)(inputs)
265
+ x, gate = x[..., :output_dim], x[..., output_dim:]
266
+ return x * nn.gelu(gate)
267
+
268
+
269
+ class CrossAttentionLayer(nn.Module):
270
+ dropout_rate: float = 0.0
271
+ num_heads: int = None
272
+ causal: bool = False
273
+ mlp_ratio: float = 4.0
274
+
275
+ @nn.compact
276
+ def __call__(
277
+ self,
278
+ x: jnp.ndarray,
279
+ y: jnp.ndarray,
280
+ *,
281
+ mask_self: jnp.ndarray | None = None,
282
+ mask_cross: jnp.ndarray | None = None,
283
+ train: bool = True,
284
+ ) -> jnp.ndarray:
285
+ d_embed = x.shape[-1]
286
+ seq_len_q = x.shape[-2]
287
+ seq_len_k = y.shape[-2]
288
+
289
+ if self.causal:
290
+ # One block size will be 1
291
+ bs_q = max(seq_len_q // seq_len_k, 1)
292
+ bs_k = max(seq_len_k // seq_len_q, 1)
293
+
294
+ mask_self = nn.make_causal_mask(x[..., 0])
295
+ mask_cross = make_block_causal_attention_matrix(x[..., 0], y[..., 0], bs_q, bs_k)
296
+
297
+ # Self-attention block
298
+ skip = x
299
+ x = nn.LayerNorm()(x)
300
+ x = nn.MultiHeadDotProductAttention(
301
+ num_heads=self.num_heads or d_embed // 64,
302
+ dropout_rate=self.dropout_rate,
303
+ deterministic=not train,
304
+ )(x, x, x, mask=mask_self)
305
+ x = skip + x
306
+
307
+ # Cross-attention block
308
+ skip = x
309
+ x = nn.LayerNorm()(x)
310
+ x = nn.MultiHeadDotProductAttention(
311
+ num_heads=self.num_heads or d_embed // 64,
312
+ dropout_rate=self.dropout_rate,
313
+ deterministic=not train,
314
+ )(x, y, y, mask=mask_cross)
315
+ x = skip + x
316
+
317
+ # MLP block
318
+ skip = x
319
+ x = nn.LayerNorm()(x)
320
+ x = nn.Dense(int(d_embed * self.mlp_ratio))(x)
321
+ x = nn.Dropout(self.dropout_rate)(x, deterministic=not train)
322
+ x = GeGLU()(x)
323
+ x = nn.Dense(d_embed)(x)
324
+ return skip + x
325
+
326
+
327
+ def sinusoidal_pe_init(_, shape: tuple[int, int]) -> jnp.ndarray:
328
+ seq_len, d_embed = shape
329
+
330
+ position = jnp.arange(0, seq_len, 1)
331
+ div_term = jnp.exp(jnp.arange(0, d_embed, 2) * -(jnp.log(10000.0) / d_embed))
332
+ return jnp.concatenate(
333
+ [
334
+ jnp.sin(position[:, jnp.newaxis] * div_term),
335
+ jnp.cos(position[:, jnp.newaxis] * div_term),
336
+ ],
337
+ axis=-1,
338
+ )
339
+
340
+
341
+ class TokenizerEncoderDecoder(nn.Module):
342
+ num_tokens: int
343
+ num_cross_tokens: int
344
+ num_layers: int
345
+ causal: bool
346
+
347
+ mlp_ratio: float = 4.0
348
+ use_state_conditioning: bool = False
349
+
350
+ @nn.compact
351
+ def __call__(
352
+ self,
353
+ y: jnp.ndarray,
354
+ *,
355
+ train: bool = True,
356
+ state_conditioning: jnp.ndarray | None = None,
357
+ mask: jnp.ndarray | None = None,
358
+ ) -> jnp.ndarray:
359
+ x = self.param("q_embed", sinusoidal_pe_init, (self.num_tokens, y.shape[-1]))
360
+ x = jax.numpy.broadcast_to(x, y.shape[:-2] + x.shape[-2:])
361
+
362
+ if mask is not None:
363
+ # mask is (batch_dims..., num_cross_tokens)
364
+ chex.assert_equal_shape([y[..., 0], mask])
365
+ attn_mask = einops.repeat(mask, "... kv -> ... 1 q kv", q=self.num_tokens)
366
+ else:
367
+ attn_mask = jnp.ones((*y.shape[:-2], 1, self.num_tokens, self.num_cross_tokens))
368
+
369
+ if self.use_state_conditioning:
370
+ assert state_conditioning is not None, "State conditioning is required for this model."
371
+ state_embed = nn.Dense(y.shape[-1], name="state_proj")(state_conditioning)[..., None, :]
372
+ y = jnp.concatenate([y, state_embed], axis=-2)
373
+ attn_mask = jnp.concatenate([attn_mask, jnp.ones_like(attn_mask[..., 0:1])], axis=-1)
374
+
375
+ y = y + self.param("y_pos_enc", sinusoidal_pe_init, y.shape[-2:])
376
+
377
+ for _ in range(self.num_layers):
378
+ x = CrossAttentionLayer(causal=self.causal, mlp_ratio=self.mlp_ratio)(
379
+ x, y, train=train, mask_self=None, mask_cross=attn_mask
380
+ )
381
+
382
+ return x
383
+
384
+
385
+ class FsqAttentionTokenizer(nn.Module):
386
+ embed_dim: int
387
+ data_dim: int
388
+ data_horizon: int
389
+ num_tokens: int
390
+ num_layers: int
391
+ target_codebook_size: int
392
+ causal: bool = False
393
+ mlp_ratio: float = 2.0
394
+
395
+ bound: float | None = None
396
+
397
+ use_state_conditioning: bool = False
398
+
399
+ @property
400
+ def vocab_size(self) -> int:
401
+ return math.prod(FsqCodebook._get_bins_fsq(self.target_codebook_size)) # noqa: SLF001
402
+
403
+ def setup(self):
404
+ self.proj = nn.Dense(self.embed_dim)
405
+ self.encoder = TokenizerEncoderDecoder(
406
+ num_tokens=self.num_tokens,
407
+ num_cross_tokens=self.data_horizon,
408
+ num_layers=self.num_layers,
409
+ causal=self.causal,
410
+ use_state_conditioning=self.use_state_conditioning,
411
+ mlp_ratio=self.mlp_ratio,
412
+ )
413
+ self.codebook = FsqCodebook(
414
+ input_dim=self.embed_dim,
415
+ target_codebook_size=self.target_codebook_size,
416
+ codebook_type="custom",
417
+ )
418
+ self.decoder = TokenizerEncoderDecoder(
419
+ num_tokens=self.data_horizon,
420
+ num_cross_tokens=self.num_tokens,
421
+ num_layers=self.num_layers,
422
+ causal=self.causal,
423
+ use_state_conditioning=self.use_state_conditioning,
424
+ mlp_ratio=self.mlp_ratio,
425
+ )
426
+
427
+ self.proj_mean = nn.Dense(self.data_dim)
428
+ self.out_scale = self.param("out_scale", lambda _: jnp.full((), 1.0))
429
+
430
+ def tokenize(
431
+ self, action: jnp.ndarray, *, obs: jnp.ndarray | None = None, train: bool = False
432
+ ) -> tuple[jnp.ndarray, jnp.ndarray]:
433
+ if self.bound is not None:
434
+ action = jnp.clip(action, -self.bound, self.bound)
435
+
436
+ x = self.proj(action)
437
+ x = self.encoder(x, train=train, state_conditioning=obs)
438
+
439
+ return self.codebook.encode(x)
440
+
441
+ def detokenize(self, tokens: jnp.ndarray, *, obs: jnp.ndarray | None = None) -> jnp.ndarray:
442
+ x = self.decoder(self.codebook.decode(tokens), state_conditioning=obs)
443
+ mean = self.proj_mean(x)
444
+ return mean * self.out_scale
445
+
446
+ def loss(
447
+ self, action: jnp.ndarray, *, obs: jnp.ndarray | None = None, train: bool = True
448
+ ) -> tuple[jnp.ndarray, dict[str, jnp.ndarray]]:
449
+ # Encode
450
+ x = self.proj(action)
451
+ z = self.encoder(x, train=train, state_conditioning=obs)
452
+
453
+ # Quantize
454
+ tokens, z = self.codebook(z)
455
+
456
+ # Decode
457
+ x = self.decoder(z, train=train, state_conditioning=obs)
458
+ mean = self.proj_mean(x) * self.out_scale
459
+
460
+ mse = jnp.mean(jnp.square(action - mean))
461
+ mae = jnp.mean(jnp.abs(action - mean))
462
+
463
+ return mse, {
464
+ "mse": mse,
465
+ "mae": mae,
466
+ }
467
+
468
+ def __call__(self, *args: Any, **kwargs: Any) -> tuple[jnp.ndarray, dict[str, jnp.ndarray]]:
469
+ """
470
+ Dummy for .init
471
+ """
472
+ return self.loss(*args, **kwargs)
src/openpi/models/vit.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Google LLC.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ViT implementation adapted from https://github.com/google-research/vision_transformer/blob/main/vit_jax/models_vit.py."""
15
+
16
+ from collections.abc import Callable
17
+ from typing import Any
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+
23
+ from openpi.models import resnet as models_resnet
24
+
25
+ Array = Any
26
+ PRNGKey = Any
27
+ Shape = tuple[int]
28
+ Dtype = Any
29
+
30
+
31
+ class IdentityLayer(nn.Module):
32
+ """Identity layer, convenient for giving a name to an array."""
33
+
34
+ @nn.compact
35
+ def __call__(self, x):
36
+ return x
37
+
38
+
39
+ class AddPositionEmbs(nn.Module):
40
+ """Adds learned positional embeddings to the inputs.
41
+
42
+ Attributes:
43
+ posemb_init: positional embedding initializer.
44
+ """
45
+
46
+ posemb_init: Callable[[PRNGKey, Shape, Dtype], Array]
47
+ param_dtype: Dtype = jnp.float32
48
+
49
+ @nn.compact
50
+ def __call__(self, inputs):
51
+ """Applies the AddPositionEmbs module.
52
+
53
+ Args:
54
+ inputs: Inputs to the layer.
55
+
56
+ Returns:
57
+ Output tensor with shape `(bs, timesteps, in_dim)`.
58
+ """
59
+ # inputs.shape is (batch_size, seq_len, emb_dim).
60
+ assert inputs.ndim == 3, f"Number of dimensions should be 3, but it is: {inputs.ndim}"
61
+ pos_emb_shape = (1, inputs.shape[1], inputs.shape[2])
62
+ pe = self.param("pos_embedding", self.posemb_init, pos_emb_shape, self.param_dtype)
63
+ return inputs + pe
64
+
65
+
66
+ class MlpBlock(nn.Module):
67
+ """Transformer MLP / feed-forward block."""
68
+
69
+ mlp_dim: int
70
+ dtype: Dtype = jnp.float32
71
+ param_dtype: Dtype = jnp.float32
72
+ out_dim: int | None = None
73
+ dropout_rate: float = 0.1
74
+ kernel_init: Callable[[PRNGKey, Shape, Dtype], Array] = nn.initializers.xavier_uniform()
75
+ bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = nn.initializers.normal(stddev=1e-6)
76
+
77
+ @nn.compact
78
+ def __call__(self, inputs, *, deterministic):
79
+ """Applies Transformer MlpBlock module."""
80
+ actual_out_dim = inputs.shape[-1] if self.out_dim is None else self.out_dim
81
+ x = nn.Dense(
82
+ features=self.mlp_dim,
83
+ dtype=self.dtype,
84
+ param_dtype=self.param_dtype,
85
+ kernel_init=self.kernel_init,
86
+ bias_init=self.bias_init,
87
+ )( # pytype: disable=wrong-arg-types
88
+ inputs
89
+ )
90
+ x = nn.gelu(x)
91
+ x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
92
+ output = nn.Dense(
93
+ features=actual_out_dim,
94
+ dtype=self.dtype,
95
+ param_dtype=self.param_dtype,
96
+ kernel_init=self.kernel_init,
97
+ bias_init=self.bias_init,
98
+ )( # pytype: disable=wrong-arg-types
99
+ x
100
+ )
101
+ return nn.Dropout(rate=self.dropout_rate)(output, deterministic=deterministic)
102
+
103
+
104
+ class Encoder1DBlock(nn.Module):
105
+ """Transformer encoder layer.
106
+
107
+ Attributes:
108
+ inputs: input data.
109
+ mlp_dim: dimension of the mlp on top of attention block.
110
+ dtype: the dtype of the computation (default: float32).
111
+ dropout_rate: dropout rate.
112
+ attention_dropout_rate: dropout for attention heads.
113
+ deterministic: bool, deterministic or not (to apply dropout).
114
+ num_heads: Number of heads in nn.MultiHeadDotProductAttention
115
+ """
116
+
117
+ mlp_dim: int
118
+ num_heads: int
119
+ dtype: Dtype = jnp.float32
120
+ dropout_rate: float = 0.1
121
+ attention_dropout_rate: float = 0.1
122
+
123
+ @nn.compact
124
+ def __call__(self, inputs, deterministic):
125
+ """Applies Encoder1DBlock module.
126
+
127
+ Args:
128
+ inputs: Inputs to the layer.
129
+ deterministic: Dropout will not be applied when set to true.
130
+
131
+ Returns:
132
+ output after transformer encoder block.
133
+ """
134
+
135
+ # Attention block.
136
+ assert inputs.ndim == 3, f"Expected (batch, seq, hidden) got {inputs.shape}"
137
+ x = nn.LayerNorm(dtype=self.dtype)(inputs)
138
+ x = nn.MultiHeadDotProductAttention(
139
+ dtype=self.dtype,
140
+ kernel_init=nn.initializers.xavier_uniform(),
141
+ broadcast_dropout=False,
142
+ deterministic=deterministic,
143
+ dropout_rate=self.attention_dropout_rate,
144
+ num_heads=self.num_heads,
145
+ # why isn't this true by default???
146
+ force_fp32_for_softmax=True,
147
+ )(x, x)
148
+ x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
149
+ x = x + inputs
150
+
151
+ # MLP block.
152
+ y = nn.LayerNorm(dtype=self.dtype)(x)
153
+ y = MlpBlock(mlp_dim=self.mlp_dim, dtype=self.dtype, dropout_rate=self.dropout_rate)(
154
+ y, deterministic=deterministic
155
+ )
156
+
157
+ return x + y, None
158
+
159
+
160
+ class Encoder(nn.Module):
161
+ """Transformer Model Encoder for sequence to sequence translation.
162
+
163
+ Attributes:
164
+ num_layers: number of layers
165
+ mlp_dim: dimension of the mlp on top of attention block
166
+ num_heads: Number of heads in nn.MultiHeadDotProductAttention
167
+ dropout_rate: dropout rate.
168
+ attention_dropout_rate: dropout rate in self attention.
169
+ """
170
+
171
+ dtype: jax.typing.DTypeLike
172
+ num_layers: int
173
+ mlp_dim: int
174
+ num_heads: int
175
+ dropout_rate: float = 0.1
176
+ attention_dropout_rate: float = 0.1
177
+ add_position_embedding: bool = True
178
+
179
+ @nn.compact
180
+ def __call__(self, x, *, train):
181
+ """Applies Transformer model on the inputs.
182
+
183
+ Args:
184
+ x: Inputs to the layer.
185
+ train: Set to `True` when training.
186
+
187
+ Returns:
188
+ output of a transformer encoder.
189
+ """
190
+ assert x.ndim == 3 # (batch, len, emb)
191
+
192
+ if self.add_position_embedding:
193
+ x = AddPositionEmbs(
194
+ posemb_init=nn.initializers.normal(stddev=0.02), # from BERT.
195
+ name="posembed_input",
196
+ )(x)
197
+ x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not train)
198
+
199
+ x = x.astype(self.dtype)
200
+ # Input Encoder
201
+ block = nn.remat(Encoder1DBlock, prevent_cse=False, static_argnums=(2,))
202
+ x, _ = nn.scan(
203
+ block,
204
+ variable_axes={"params": 0},
205
+ split_rngs={"params": True, "dropout": True},
206
+ in_axes=nn.broadcast,
207
+ length=self.num_layers,
208
+ )(
209
+ name="encoderblock",
210
+ mlp_dim=self.mlp_dim,
211
+ dropout_rate=self.dropout_rate,
212
+ attention_dropout_rate=self.attention_dropout_rate,
213
+ dtype=self.dtype,
214
+ num_heads=self.num_heads,
215
+ )(x, not train)
216
+ return nn.LayerNorm(name="encoder_norm", dtype=self.dtype)(x)
217
+
218
+
219
+ class VisionTransformer(nn.Module):
220
+ """VisionTransformer."""
221
+
222
+ dtype: jax.typing.DTypeLike
223
+ num_classes: int
224
+ patches: Any
225
+ transformer: Any
226
+ hidden_size: int
227
+ resnet: Any | None = None
228
+ representation_size: int | None = None
229
+ classifier: str = "token"
230
+ head_bias_init: float = 0.0
231
+ encoder: type[nn.Module] = Encoder
232
+ model_name: str | None = None
233
+
234
+ @nn.compact
235
+ def __call__(self, inputs, *, train):
236
+ x = inputs
237
+ # (Possibly partial) ResNet root.
238
+ if self.resnet is not None:
239
+ width = int(64 * self.resnet.width_factor)
240
+
241
+ # Root block.
242
+ x = models_resnet.StdConv(
243
+ features=width, kernel_size=(7, 7), strides=(2, 2), use_bias=False, name="conv_root"
244
+ )(x)
245
+ x = nn.GroupNorm(name="gn_root")(x)
246
+ x = nn.relu(x)
247
+ x = nn.max_pool(x, window_shape=(3, 3), strides=(2, 2), padding="SAME")
248
+
249
+ # ResNet stages.
250
+ if self.resnet.num_layers:
251
+ x = models_resnet.ResNetStage(
252
+ block_size=self.resnet.num_layers[0], nout=width, first_stride=(1, 1), name="block1"
253
+ )(x)
254
+ for i, block_size in enumerate(self.resnet.num_layers[1:], 1):
255
+ x = models_resnet.ResNetStage(
256
+ block_size=block_size, nout=width * 2**i, first_stride=(2, 2), name=f"block{i + 1}"
257
+ )(x)
258
+
259
+ n, h, w, c = x.shape
260
+
261
+ # We can merge s2d+emb into a single conv; it's the same.
262
+ x = nn.Conv(
263
+ features=self.hidden_size,
264
+ kernel_size=self.patches.size,
265
+ strides=self.patches.size,
266
+ padding="VALID",
267
+ name="embedding",
268
+ )(x)
269
+
270
+ # Here, x is a grid of embeddings.
271
+
272
+ # (Possibly partial) Transformer.
273
+ if self.transformer is not None:
274
+ n, h, w, c = x.shape
275
+ x = jnp.reshape(x, [n, h * w, c])
276
+
277
+ # If we want to add a class token, add it here.
278
+ if self.classifier in ["token", "token_unpooled"]:
279
+ cls = self.param("cls", nn.initializers.zeros, (1, 1, c))
280
+ cls = jnp.tile(cls, [n, 1, 1])
281
+ x = jnp.concatenate([cls, x], axis=1)
282
+
283
+ x = self.encoder(name="Transformer", **self.transformer, dtype=self.dtype)(x, train=train)
284
+
285
+ if self.classifier == "token":
286
+ x = x[:, 0]
287
+ elif self.classifier == "gap":
288
+ x = jnp.mean(x, axis=list(range(1, x.ndim - 1))) # (1,) or (1,2)
289
+ elif self.classifier in ["unpooled", "token_unpooled"]:
290
+ pass
291
+ else:
292
+ raise ValueError(f"Invalid classifier={self.classifier}")
293
+
294
+ if self.representation_size is not None:
295
+ x = nn.Dense(features=self.representation_size, name="pre_logits")(x)
296
+ x = nn.tanh(x)
297
+ else:
298
+ x = IdentityLayer(name="pre_logits")(x)
299
+
300
+ if self.num_classes:
301
+ x = nn.Dense(
302
+ features=self.num_classes,
303
+ name="head",
304
+ kernel_init=nn.initializers.zeros,
305
+ bias_init=nn.initializers.constant(self.head_bias_init),
306
+ )(x)
307
+ return x
src/openpi/models_pytorch/__pycache__/gemma_pytorch.cpython-311.pyc ADDED
Binary file (13.9 kB). View file
 
src/openpi/models_pytorch/__pycache__/pi0_pytorch.cpython-311.pyc ADDED
Binary file (24.8 kB). View file
 
src/openpi/models_pytorch/__pycache__/preprocessing_pytorch.cpython-311.pyc ADDED
Binary file (6.97 kB). View file
 
src/openpi/models_pytorch/gemma_pytorch.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ import pytest
4
+ import torch
5
+ from torch import nn
6
+ from transformers import GemmaForCausalLM
7
+ from transformers import PaliGemmaForConditionalGeneration
8
+ from transformers.models.auto import CONFIG_MAPPING
9
+ from transformers.models.gemma import modeling_gemma
10
+
11
+
12
+ class PaliGemmaWithExpertModel(nn.Module):
13
+ def __init__(
14
+ self,
15
+ vlm_config,
16
+ action_expert_config,
17
+ use_adarms=None,
18
+ precision: Literal["bfloat16", "float32"] = "bfloat16",
19
+ ):
20
+ if use_adarms is None:
21
+ use_adarms = [False, False]
22
+ super().__init__()
23
+
24
+ vlm_config_hf = CONFIG_MAPPING["paligemma"]()
25
+ vlm_config_hf._vocab_size = 257152 # noqa: SLF001
26
+ vlm_config_hf.image_token_index = 257152
27
+ vlm_config_hf.text_config.hidden_size = vlm_config.width
28
+ vlm_config_hf.text_config.intermediate_size = vlm_config.mlp_dim
29
+ vlm_config_hf.text_config.num_attention_heads = vlm_config.num_heads
30
+ vlm_config_hf.text_config.head_dim = vlm_config.head_dim
31
+ vlm_config_hf.text_config.num_hidden_layers = vlm_config.depth
32
+ vlm_config_hf.text_config.num_key_value_heads = vlm_config.num_kv_heads
33
+ vlm_config_hf.text_config.hidden_activation = "gelu_pytorch_tanh"
34
+ vlm_config_hf.text_config.torch_dtype = "float32"
35
+ vlm_config_hf.text_config.vocab_size = 257152
36
+ vlm_config_hf.text_config.use_adarms = use_adarms[0]
37
+ vlm_config_hf.text_config.adarms_cond_dim = vlm_config.width if use_adarms[0] else None
38
+ vlm_config_hf.vision_config.intermediate_size = 4304
39
+ vlm_config_hf.vision_config.projection_dim = 2048
40
+ vlm_config_hf.vision_config.projector_hidden_act = "gelu_fast"
41
+ vlm_config_hf.vision_config.torch_dtype = "float32"
42
+
43
+ action_expert_config_hf = CONFIG_MAPPING["gemma"](
44
+ head_dim=action_expert_config.head_dim,
45
+ hidden_size=action_expert_config.width,
46
+ intermediate_size=action_expert_config.mlp_dim,
47
+ num_attention_heads=action_expert_config.num_heads,
48
+ num_hidden_layers=action_expert_config.depth,
49
+ num_key_value_heads=action_expert_config.num_kv_heads,
50
+ vocab_size=257152,
51
+ hidden_activation="gelu_pytorch_tanh",
52
+ torch_dtype="float32",
53
+ use_adarms=use_adarms[1],
54
+ adarms_cond_dim=action_expert_config.width if use_adarms[1] else None,
55
+ )
56
+
57
+ self.paligemma = PaliGemmaForConditionalGeneration(config=vlm_config_hf)
58
+ self.gemma_expert = GemmaForCausalLM(config=action_expert_config_hf)
59
+ self.gemma_expert.model.embed_tokens = None
60
+
61
+ self.to_bfloat16_for_selected_params(precision)
62
+
63
+ def to_bfloat16_for_selected_params(self, precision: Literal["bfloat16", "float32"] = "bfloat16"):
64
+ if precision == "bfloat16":
65
+ self.to(dtype=torch.bfloat16)
66
+ elif precision == "float32":
67
+ self.to(dtype=torch.float32)
68
+ return
69
+ else:
70
+ raise ValueError(f"Invalid precision: {precision}")
71
+
72
+ params_to_keep_float32 = [
73
+ "vision_tower.vision_model.embeddings.patch_embedding.weight",
74
+ "vision_tower.vision_model.embeddings.patch_embedding.bias",
75
+ "vision_tower.vision_model.embeddings.position_embedding.weight",
76
+ "input_layernorm",
77
+ "post_attention_layernorm",
78
+ "model.norm",
79
+ ]
80
+
81
+ for name, param in self.named_parameters():
82
+ if any(selector in name for selector in params_to_keep_float32):
83
+ param.data = param.data.to(dtype=torch.float32)
84
+
85
+ def embed_image(self, image: torch.Tensor):
86
+ return self.paligemma.model.get_image_features(image)
87
+
88
+ def embed_language_tokens(self, tokens: torch.Tensor):
89
+ return self.paligemma.language_model.embed_tokens(tokens)
90
+
91
+ def forward(
92
+ self,
93
+ attention_mask: torch.Tensor | None = None,
94
+ position_ids: torch.LongTensor | None = None,
95
+ past_key_values: list[torch.FloatTensor] | pytest.Cache | None = None,
96
+ inputs_embeds: list[torch.FloatTensor] | None = None,
97
+ use_cache: bool | None = None,
98
+ adarms_cond: list[torch.Tensor] | None = None,
99
+ ):
100
+ if adarms_cond is None:
101
+ adarms_cond = [None, None]
102
+ if inputs_embeds[1] is None:
103
+ prefix_output = self.paligemma.language_model.forward(
104
+ inputs_embeds=inputs_embeds[0],
105
+ attention_mask=attention_mask,
106
+ position_ids=position_ids,
107
+ past_key_values=past_key_values,
108
+ use_cache=use_cache,
109
+ adarms_cond=adarms_cond[0] if adarms_cond is not None else None,
110
+ )
111
+ prefix_past_key_values = prefix_output.past_key_values
112
+ prefix_output = prefix_output.last_hidden_state
113
+ suffix_output = None
114
+ elif inputs_embeds[0] is None:
115
+ suffix_output = self.gemma_expert.model.forward(
116
+ inputs_embeds=inputs_embeds[1],
117
+ attention_mask=attention_mask,
118
+ position_ids=position_ids,
119
+ past_key_values=past_key_values,
120
+ use_cache=use_cache,
121
+ adarms_cond=adarms_cond[1] if adarms_cond is not None else None,
122
+ )
123
+ suffix_output = suffix_output.last_hidden_state
124
+ prefix_output = None
125
+ prefix_past_key_values = None
126
+ else:
127
+ models = [self.paligemma.language_model, self.gemma_expert.model]
128
+ num_layers = self.paligemma.config.text_config.num_hidden_layers
129
+
130
+ # Check if gradient checkpointing is enabled for any of the models
131
+ use_gradient_checkpointing = (
132
+ hasattr(self.gemma_expert.model, "gradient_checkpointing")
133
+ and self.gemma_expert.model.gradient_checkpointing
134
+ and self.training
135
+ ) or (hasattr(self, "gradient_checkpointing") and self.gradient_checkpointing and self.training)
136
+
137
+ # Force enable gradient checkpointing if we're in training mode and the model supports it
138
+ if self.training and hasattr(self.gemma_expert.model, "gradient_checkpointing"):
139
+ if not self.gemma_expert.model.gradient_checkpointing:
140
+ print("Forcing gradient checkpointing to be enabled for Gemma expert model")
141
+ self.gemma_expert.model.gradient_checkpointing = True
142
+ use_gradient_checkpointing = True
143
+
144
+ # Debug gradient checkpointing status
145
+ if hasattr(self, "_debug_gc_printed") and not self._debug_gc_printed:
146
+ print(f"Gemma expert model gradient checkpointing: {use_gradient_checkpointing}")
147
+ print(f"Model training mode: {self.training}")
148
+ print(
149
+ f"Gemma expert model has gradient_checkpointing attr: {hasattr(self.gemma_expert.model, 'gradient_checkpointing')}"
150
+ )
151
+ if hasattr(self.gemma_expert.model, "gradient_checkpointing"):
152
+ print(
153
+ f"Gemma expert model gradient_checkpointing value: {self.gemma_expert.model.gradient_checkpointing}"
154
+ )
155
+ self._debug_gc_printed = True
156
+
157
+ # Define the complete layer computation function for gradient checkpointing
158
+ def compute_layer_complete(layer_idx, inputs_embeds, attention_mask, position_ids, adarms_cond):
159
+ models = [self.paligemma.language_model, self.gemma_expert.model]
160
+
161
+ query_states = []
162
+ key_states = []
163
+ value_states = []
164
+ gates = []
165
+ for i, hidden_states in enumerate(inputs_embeds):
166
+ layer = models[i].layers[layer_idx]
167
+ hidden_states, gate = layer.input_layernorm(hidden_states, cond=adarms_cond[i]) # noqa: PLW2901
168
+ gates.append(gate)
169
+
170
+ input_shape = hidden_states.shape[:-1]
171
+ hidden_shape = (*input_shape, -1, layer.self_attn.head_dim)
172
+ query_state = layer.self_attn.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
173
+ key_state = layer.self_attn.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
174
+ value_state = layer.self_attn.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
175
+
176
+ query_states.append(query_state)
177
+ key_states.append(key_state)
178
+ value_states.append(value_state)
179
+
180
+ # Concatenate and process attention
181
+ query_states = torch.cat(query_states, dim=2)
182
+ key_states = torch.cat(key_states, dim=2)
183
+ value_states = torch.cat(value_states, dim=2)
184
+
185
+ dummy_tensor = torch.zeros(
186
+ query_states.shape[0],
187
+ query_states.shape[2],
188
+ query_states.shape[-1],
189
+ device=query_states.device,
190
+ dtype=query_states.dtype,
191
+ )
192
+ cos, sin = self.paligemma.model.language_model.rotary_emb(dummy_tensor, position_ids)
193
+ query_states, key_states = modeling_gemma.apply_rotary_pos_emb(
194
+ query_states, key_states, cos, sin, unsqueeze_dim=1
195
+ )
196
+
197
+ batch_size = query_states.shape[0]
198
+ scaling = self.paligemma.language_model.layers[layer_idx].self_attn.scaling
199
+
200
+ # Attention computation
201
+ att_output, _ = modeling_gemma.eager_attention_forward(
202
+ self.paligemma.language_model.layers[layer_idx].self_attn,
203
+ query_states,
204
+ key_states,
205
+ value_states,
206
+ attention_mask,
207
+ scaling,
208
+ )
209
+ # Get head_dim from the current layer, not from the model
210
+ head_dim = self.paligemma.language_model.layers[layer_idx].self_attn.head_dim
211
+ att_output = att_output.reshape(batch_size, -1, 1 * 8 * head_dim)
212
+
213
+ # Process layer outputs
214
+ outputs_embeds = []
215
+ start_pos = 0
216
+ for i, hidden_states in enumerate(inputs_embeds):
217
+ layer = models[i].layers[layer_idx]
218
+ end_pos = start_pos + hidden_states.shape[1]
219
+
220
+ if att_output.dtype != layer.self_attn.o_proj.weight.dtype:
221
+ att_output = att_output.to(layer.self_attn.o_proj.weight.dtype)
222
+ out_emb = layer.self_attn.o_proj(att_output[:, start_pos:end_pos])
223
+
224
+ # first residual
225
+ out_emb = modeling_gemma._gated_residual(hidden_states, out_emb, gates[i]) # noqa: SLF001
226
+ after_first_residual = out_emb.clone()
227
+ out_emb, gate = layer.post_attention_layernorm(out_emb, cond=adarms_cond[i])
228
+ # Convert to bfloat16 if the next layer (mlp) uses bfloat16
229
+ if layer.mlp.up_proj.weight.dtype == torch.bfloat16:
230
+ out_emb = out_emb.to(dtype=torch.bfloat16)
231
+
232
+ out_emb = layer.mlp(out_emb)
233
+ # second residual
234
+ out_emb = modeling_gemma._gated_residual(after_first_residual, out_emb, gate) # noqa: SLF001
235
+ outputs_embeds.append(out_emb)
236
+ start_pos = end_pos
237
+
238
+ return outputs_embeds
239
+
240
+ # Process all layers with gradient checkpointing if enabled
241
+ for layer_idx in range(num_layers):
242
+ if use_gradient_checkpointing:
243
+ inputs_embeds = torch.utils.checkpoint.checkpoint(
244
+ compute_layer_complete,
245
+ layer_idx,
246
+ inputs_embeds,
247
+ attention_mask,
248
+ position_ids,
249
+ adarms_cond,
250
+ use_reentrant=False,
251
+ preserve_rng_state=False,
252
+ )
253
+ else:
254
+ inputs_embeds = compute_layer_complete(
255
+ layer_idx, inputs_embeds, attention_mask, position_ids, adarms_cond
256
+ )
257
+
258
+ # Old code removed - now using compute_layer_complete function above
259
+
260
+ # final norm
261
+ # Define final norm computation function for gradient checkpointing
262
+ def compute_final_norms(inputs_embeds, adarms_cond):
263
+ outputs_embeds = []
264
+ for i, hidden_states in enumerate(inputs_embeds):
265
+ out_emb, _ = models[i].norm(hidden_states, cond=adarms_cond[i])
266
+ outputs_embeds.append(out_emb)
267
+ return outputs_embeds
268
+
269
+ # Apply gradient checkpointing to final norm if enabled
270
+ if use_gradient_checkpointing:
271
+ outputs_embeds = torch.utils.checkpoint.checkpoint(
272
+ compute_final_norms, inputs_embeds, adarms_cond, use_reentrant=False, preserve_rng_state=False
273
+ )
274
+ else:
275
+ outputs_embeds = compute_final_norms(inputs_embeds, adarms_cond)
276
+
277
+ prefix_output = outputs_embeds[0]
278
+ suffix_output = outputs_embeds[1]
279
+ prefix_past_key_values = None
280
+
281
+ return [prefix_output, suffix_output], prefix_past_key_values
src/openpi/models_pytorch/pi0_pytorch.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch import nn
7
+ import torch.nn.functional as F # noqa: N812
8
+
9
+ import openpi.models.gemma as _gemma
10
+ from openpi.models_pytorch.gemma_pytorch import PaliGemmaWithExpertModel
11
+ import openpi.models_pytorch.preprocessing_pytorch as _preprocessing
12
+
13
+
14
+ def get_safe_dtype(target_dtype, device_type):
15
+ """Get a safe dtype for the given device type."""
16
+ if device_type == "cpu":
17
+ # CPU doesn't support bfloat16, use float32 instead
18
+ if target_dtype == torch.bfloat16:
19
+ return torch.float32
20
+ if target_dtype == torch.float64:
21
+ return torch.float64
22
+ return target_dtype
23
+
24
+
25
+ def create_sinusoidal_pos_embedding(
26
+ time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu"
27
+ ) -> Tensor:
28
+ """Computes sine-cosine positional embedding vectors for scalar positions."""
29
+ if dimension % 2 != 0:
30
+ raise ValueError(f"dimension ({dimension}) must be divisible by 2")
31
+
32
+ if time.ndim != 1:
33
+ raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
34
+
35
+ dtype = get_safe_dtype(torch.float64, device.type)
36
+ fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
37
+ period = min_period * (max_period / min_period) ** fraction
38
+
39
+ # Compute the outer product
40
+ scaling_factor = 1.0 / period * 2 * math.pi
41
+ sin_input = scaling_factor[None, :] * time[:, None]
42
+ return torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
43
+
44
+
45
+ def sample_beta(alpha, beta, bsize, device):
46
+ alpha_t = torch.as_tensor(alpha, dtype=torch.float32, device=device)
47
+ beta_t = torch.as_tensor(beta, dtype=torch.float32, device=device)
48
+ dist = torch.distributions.Beta(alpha_t, beta_t)
49
+ return dist.sample((bsize,))
50
+
51
+
52
+ def make_att_2d_masks(pad_masks, att_masks):
53
+ """Copied from big_vision.
54
+
55
+ Tokens can attend to valid inputs tokens which have a cumulative mask_ar
56
+ smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
57
+ setup several types of attention, for example:
58
+
59
+ [[1 1 1 1 1 1]]: pure causal attention.
60
+
61
+ [[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
62
+ themselves and the last 3 tokens have a causal attention. The first
63
+ entry could also be a 1 without changing behaviour.
64
+
65
+ [[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
66
+ block can attend all previous blocks and all tokens on the same block.
67
+
68
+ Args:
69
+ input_mask: bool[B, N] true if its part of the input, false if padding.
70
+ mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on
71
+ it and 0 where it shares the same attention mask as the previous token.
72
+ """
73
+ if att_masks.ndim != 2:
74
+ raise ValueError(att_masks.ndim)
75
+ if pad_masks.ndim != 2:
76
+ raise ValueError(pad_masks.ndim)
77
+
78
+ cumsum = torch.cumsum(att_masks, dim=1)
79
+ att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
80
+ pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
81
+ return att_2d_masks & pad_2d_masks
82
+
83
+
84
+ class PI0Pytorch(nn.Module):
85
+ def __init__(self, config):
86
+ super().__init__()
87
+ self.config = config
88
+ self.pi05 = config.pi05
89
+
90
+ paligemma_config = _gemma.get_config(config.paligemma_variant)
91
+ action_expert_config = _gemma.get_config(config.action_expert_variant)
92
+
93
+ self.paligemma_with_expert = PaliGemmaWithExpertModel(
94
+ paligemma_config,
95
+ action_expert_config,
96
+ use_adarms=[False, True] if self.pi05 else [False, False],
97
+ precision=config.dtype,
98
+ )
99
+
100
+ self.action_in_proj = nn.Linear(32, action_expert_config.width)
101
+ self.action_out_proj = nn.Linear(action_expert_config.width, 32)
102
+
103
+ if self.pi05:
104
+ self.time_mlp_in = nn.Linear(action_expert_config.width, action_expert_config.width)
105
+ self.time_mlp_out = nn.Linear(action_expert_config.width, action_expert_config.width)
106
+ else:
107
+ self.state_proj = nn.Linear(32, action_expert_config.width)
108
+ self.action_time_mlp_in = nn.Linear(2 * action_expert_config.width, action_expert_config.width)
109
+ self.action_time_mlp_out = nn.Linear(action_expert_config.width, action_expert_config.width)
110
+
111
+ torch.set_float32_matmul_precision("high")
112
+ self.sample_actions = torch.compile(self.sample_actions, mode="max-autotune")
113
+
114
+ # Initialize gradient checkpointing flag
115
+ self.gradient_checkpointing_enabled = False
116
+
117
+ msg = "transformers_replace is not installed correctly. Please install it with `uv pip install transformers==4.53.2` and `cp -r ./src/openpi/models_pytorch/transformers_replace/* .venv/lib/python3.11/site-packages/transformers/`."
118
+ try:
119
+ from transformers.models.siglip import check
120
+
121
+ if not check.check_whether_transformers_replace_is_installed_correctly():
122
+ raise ValueError(msg)
123
+ except ImportError:
124
+ raise ValueError(msg) from None
125
+
126
+ def gradient_checkpointing_enable(self):
127
+ """Enable gradient checkpointing for memory optimization."""
128
+ self.gradient_checkpointing_enabled = True
129
+ self.paligemma_with_expert.paligemma.language_model.gradient_checkpointing = True
130
+ self.paligemma_with_expert.paligemma.vision_tower.gradient_checkpointing = True
131
+ self.paligemma_with_expert.gemma_expert.model.gradient_checkpointing = True
132
+
133
+ logging.info("Enabled gradient checkpointing for PI0Pytorch model")
134
+
135
+ def gradient_checkpointing_disable(self):
136
+ """Disable gradient checkpointing."""
137
+ self.gradient_checkpointing_enabled = False
138
+ self.paligemma_with_expert.paligemma.language_model.gradient_checkpointing = False
139
+ self.paligemma_with_expert.paligemma.vision_tower.gradient_checkpointing = False
140
+ self.paligemma_with_expert.gemma_expert.model.gradient_checkpointing = False
141
+
142
+ logging.info("Disabled gradient checkpointing for PI0Pytorch model")
143
+
144
+ def is_gradient_checkpointing_enabled(self):
145
+ """Check if gradient checkpointing is enabled."""
146
+ return self.gradient_checkpointing_enabled
147
+
148
+ def _apply_checkpoint(self, func, *args, **kwargs):
149
+ """Helper method to apply gradient checkpointing if enabled."""
150
+ if self.gradient_checkpointing_enabled and self.training:
151
+ return torch.utils.checkpoint.checkpoint(
152
+ func, *args, use_reentrant=False, preserve_rng_state=False, **kwargs
153
+ )
154
+ return func(*args, **kwargs)
155
+
156
+ def _prepare_attention_masks_4d(self, att_2d_masks):
157
+ """Helper method to prepare 4D attention masks for transformer."""
158
+ att_2d_masks_4d = att_2d_masks[:, None, :, :]
159
+ return torch.where(att_2d_masks_4d, 0.0, -2.3819763e38)
160
+
161
+ def _preprocess_observation(self, observation, *, train=True):
162
+ """Helper method to preprocess observation."""
163
+ observation = _preprocessing.preprocess_observation_pytorch(
164
+ observation, train=train, image_keys=list(observation.images.keys())
165
+ )
166
+ return (
167
+ list(observation.images.values()),
168
+ list(observation.image_masks.values()),
169
+ observation.tokenized_prompt,
170
+ observation.tokenized_prompt_mask,
171
+ observation.state,
172
+ )
173
+
174
+ def sample_noise(self, shape, device):
175
+ return torch.normal(
176
+ mean=0.0,
177
+ std=1.0,
178
+ size=shape,
179
+ dtype=torch.float32,
180
+ device=device,
181
+ )
182
+
183
+ def sample_time(self, bsize, device):
184
+ time_beta = sample_beta(1.5, 1.0, bsize, device)
185
+ time = time_beta * 0.999 + 0.001
186
+ return time.to(dtype=torch.float32, device=device)
187
+
188
+ def embed_prefix(
189
+ self, images, img_masks, lang_tokens, lang_masks
190
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
191
+ """Embed images with SigLIP and language tokens with embedding layer to prepare
192
+ for PaliGemma transformer processing.
193
+ """
194
+ embs = []
195
+ pad_masks = []
196
+ att_masks = []
197
+
198
+ # Process images
199
+ for img, img_mask in zip(images, img_masks, strict=True):
200
+
201
+ def image_embed_func(img):
202
+ return self.paligemma_with_expert.embed_image(img)
203
+
204
+ img_emb = self._apply_checkpoint(image_embed_func, img)
205
+
206
+ bsize, num_img_embs = img_emb.shape[:2]
207
+
208
+ embs.append(img_emb)
209
+ pad_masks.append(img_mask[:, None].expand(bsize, num_img_embs))
210
+
211
+ # Create attention masks so that image tokens attend to each other
212
+ att_masks += [0] * num_img_embs
213
+
214
+ # Process language tokens
215
+ def lang_embed_func(lang_tokens):
216
+ lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens)
217
+ lang_emb_dim = lang_emb.shape[-1]
218
+ return lang_emb * math.sqrt(lang_emb_dim)
219
+
220
+ lang_emb = self._apply_checkpoint(lang_embed_func, lang_tokens)
221
+
222
+ embs.append(lang_emb)
223
+ pad_masks.append(lang_masks)
224
+
225
+ # full attention between image and language inputs
226
+ num_lang_embs = lang_emb.shape[1]
227
+ att_masks += [0] * num_lang_embs
228
+
229
+ embs = torch.cat(embs, dim=1)
230
+ pad_masks = torch.cat(pad_masks, dim=1)
231
+ att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device)
232
+
233
+ # Get batch size from the first dimension of the concatenated tensors
234
+ bsize = pad_masks.shape[0]
235
+ att_masks = att_masks[None, :].expand(bsize, len(att_masks))
236
+
237
+ return embs, pad_masks, att_masks
238
+
239
+ def embed_suffix(self, state, noisy_actions, timestep):
240
+ """Embed state, noisy_actions, timestep to prepare for Expert Gemma processing."""
241
+ embs = []
242
+ pad_masks = []
243
+ att_masks = []
244
+
245
+ if not self.pi05:
246
+ if self.state_proj.weight.dtype == torch.float32:
247
+ state = state.to(torch.float32)
248
+
249
+ # Embed state
250
+ def state_proj_func(state):
251
+ return self.state_proj(state)
252
+
253
+ state_emb = self._apply_checkpoint(state_proj_func, state)
254
+
255
+ embs.append(state_emb[:, None, :])
256
+ bsize = state_emb.shape[0]
257
+ device = state_emb.device
258
+
259
+ state_mask = torch.ones(bsize, 1, dtype=torch.bool, device=device)
260
+ pad_masks.append(state_mask)
261
+
262
+ # Set attention masks so that image and language inputs do not attend to state or actions
263
+ att_masks += [1]
264
+
265
+ # Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
266
+ time_emb = create_sinusoidal_pos_embedding(
267
+ timestep, self.action_in_proj.out_features, min_period=4e-3, max_period=4.0, device=timestep.device
268
+ )
269
+ time_emb = time_emb.type(dtype=timestep.dtype)
270
+
271
+ # Fuse timestep + action information using an MLP
272
+ def action_proj_func(noisy_actions):
273
+ return self.action_in_proj(noisy_actions)
274
+
275
+ action_emb = self._apply_checkpoint(action_proj_func, noisy_actions)
276
+
277
+ if not self.pi05:
278
+ time_emb = time_emb[:, None, :].expand_as(action_emb)
279
+ action_time_emb = torch.cat([action_emb, time_emb], dim=2)
280
+
281
+ # Apply MLP layers
282
+ def mlp_func(action_time_emb):
283
+ x = self.action_time_mlp_in(action_time_emb)
284
+ x = F.silu(x) # swish == silu
285
+ return self.action_time_mlp_out(x)
286
+
287
+ action_time_emb = self._apply_checkpoint(mlp_func, action_time_emb)
288
+ adarms_cond = None
289
+ else:
290
+ # time MLP (for adaRMS)
291
+ def time_mlp_func(time_emb):
292
+ x = self.time_mlp_in(time_emb)
293
+ x = F.silu(x) # swish == silu
294
+ x = self.time_mlp_out(x)
295
+ return F.silu(x)
296
+
297
+ time_emb = self._apply_checkpoint(time_mlp_func, time_emb)
298
+ action_time_emb = action_emb
299
+ adarms_cond = time_emb
300
+
301
+ # Add to input tokens
302
+ embs.append(action_time_emb)
303
+
304
+ bsize, action_time_dim = action_time_emb.shape[:2]
305
+ action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=timestep.device)
306
+ pad_masks.append(action_time_mask)
307
+
308
+ # Set attention masks so that image, language and state inputs do not attend to action tokens
309
+ att_masks += [1] + ([0] * (self.config.action_horizon - 1))
310
+
311
+ embs = torch.cat(embs, dim=1)
312
+ pad_masks = torch.cat(pad_masks, dim=1)
313
+ att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
314
+ att_masks = att_masks[None, :].expand(bsize, len(att_masks))
315
+
316
+ return embs, pad_masks, att_masks, adarms_cond
317
+
318
+ def forward(self, observation, actions, noise=None, time=None) -> Tensor:
319
+ """Do a full training forward pass and compute the loss (batch_size x num_steps x num_motors)"""
320
+ images, img_masks, lang_tokens, lang_masks, state = self._preprocess_observation(observation, train=True)
321
+
322
+ if noise is None:
323
+ noise = self.sample_noise(actions.shape, actions.device)
324
+
325
+ if time is None:
326
+ time = self.sample_time(actions.shape[0], actions.device)
327
+
328
+ time_expanded = time[:, None, None]
329
+ x_t = time_expanded * noise + (1 - time_expanded) * actions
330
+ u_t = noise - actions
331
+
332
+ prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks)
333
+ suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, time)
334
+ if (
335
+ self.paligemma_with_expert.paligemma.language_model.layers[0].self_attn.q_proj.weight.dtype
336
+ == torch.bfloat16
337
+ ):
338
+ suffix_embs = suffix_embs.to(dtype=torch.bfloat16)
339
+ prefix_embs = prefix_embs.to(dtype=torch.bfloat16)
340
+
341
+ pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
342
+ att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
343
+
344
+ att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
345
+ position_ids = torch.cumsum(pad_masks, dim=1) - 1
346
+
347
+ # Prepare attention masks
348
+ att_2d_masks_4d = self._prepare_attention_masks_4d(att_2d_masks)
349
+
350
+ # Apply gradient checkpointing if enabled
351
+ def forward_func(prefix_embs, suffix_embs, att_2d_masks_4d, position_ids, adarms_cond):
352
+ (_, suffix_out), _ = self.paligemma_with_expert.forward(
353
+ attention_mask=att_2d_masks_4d,
354
+ position_ids=position_ids,
355
+ past_key_values=None,
356
+ inputs_embeds=[prefix_embs, suffix_embs],
357
+ use_cache=False,
358
+ adarms_cond=[None, adarms_cond],
359
+ )
360
+ return suffix_out
361
+
362
+ suffix_out = self._apply_checkpoint(
363
+ forward_func, prefix_embs, suffix_embs, att_2d_masks_4d, position_ids, adarms_cond
364
+ )
365
+
366
+ suffix_out = suffix_out[:, -self.config.action_horizon :]
367
+ suffix_out = suffix_out.to(dtype=torch.float32)
368
+
369
+ # Apply gradient checkpointing to final action projection if enabled
370
+ def action_out_proj_func(suffix_out):
371
+ return self.action_out_proj(suffix_out)
372
+
373
+ v_t = self._apply_checkpoint(action_out_proj_func, suffix_out)
374
+
375
+ return F.mse_loss(u_t, v_t, reduction="none")
376
+
377
+ @torch.no_grad()
378
+ def sample_actions(self, device, observation, noise=None, num_steps=10) -> Tensor:
379
+ """Do a full inference forward and compute the action (batch_size x num_steps x num_motors)"""
380
+ bsize = observation.state.shape[0]
381
+ if noise is None:
382
+ actions_shape = (bsize, self.config.action_horizon, self.config.action_dim)
383
+ noise = self.sample_noise(actions_shape, device)
384
+
385
+ images, img_masks, lang_tokens, lang_masks, state = self._preprocess_observation(observation, train=False)
386
+
387
+ prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks)
388
+ prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
389
+ prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
390
+
391
+ # Compute image and language key value cache
392
+ prefix_att_2d_masks_4d = self._prepare_attention_masks_4d(prefix_att_2d_masks)
393
+ self.paligemma_with_expert.paligemma.language_model.config._attn_implementation = "eager" # noqa: SLF001
394
+
395
+ _, past_key_values = self.paligemma_with_expert.forward(
396
+ attention_mask=prefix_att_2d_masks_4d,
397
+ position_ids=prefix_position_ids,
398
+ past_key_values=None,
399
+ inputs_embeds=[prefix_embs, None],
400
+ use_cache=True,
401
+ )
402
+
403
+ dt = -1.0 / num_steps
404
+ dt = torch.tensor(dt, dtype=torch.float32, device=device)
405
+
406
+ x_t = noise
407
+ time = torch.tensor(1.0, dtype=torch.float32, device=device)
408
+ while time >= -dt / 2:
409
+ expanded_time = time.expand(bsize)
410
+ v_t = self.denoise_step(
411
+ state,
412
+ prefix_pad_masks,
413
+ past_key_values,
414
+ x_t,
415
+ expanded_time,
416
+ )
417
+
418
+ # Euler step - use new tensor assignment instead of in-place operation
419
+ x_t = x_t + dt * v_t
420
+ time += dt
421
+ return x_t
422
+
423
+ def denoise_step(
424
+ self,
425
+ state,
426
+ prefix_pad_masks,
427
+ past_key_values,
428
+ x_t,
429
+ timestep,
430
+ ):
431
+ """Apply one denoising step of the noise `x_t` at a given timestep."""
432
+ suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, timestep)
433
+
434
+ suffix_len = suffix_pad_masks.shape[1]
435
+ batch_size = prefix_pad_masks.shape[0]
436
+ prefix_len = prefix_pad_masks.shape[1]
437
+
438
+ prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
439
+
440
+ suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
441
+
442
+ full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
443
+
444
+ prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
445
+ position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
446
+
447
+ # Prepare attention masks
448
+ full_att_2d_masks_4d = self._prepare_attention_masks_4d(full_att_2d_masks)
449
+ self.paligemma_with_expert.gemma_expert.model.config._attn_implementation = "eager" # noqa: SLF001
450
+
451
+ outputs_embeds, _ = self.paligemma_with_expert.forward(
452
+ attention_mask=full_att_2d_masks_4d,
453
+ position_ids=position_ids,
454
+ past_key_values=past_key_values,
455
+ inputs_embeds=[None, suffix_embs],
456
+ use_cache=False,
457
+ adarms_cond=[None, adarms_cond],
458
+ )
459
+
460
+ suffix_out = outputs_embeds[1]
461
+ suffix_out = suffix_out[:, -self.config.action_horizon :]
462
+ suffix_out = suffix_out.to(dtype=torch.float32)
463
+ return self.action_out_proj(suffix_out)
src/openpi/models_pytorch/preprocessing_pytorch.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ import logging
3
+
4
+ import torch
5
+
6
+ from openpi.shared import image_tools
7
+
8
+ logger = logging.getLogger("openpi")
9
+
10
+ # Constants moved from model.py
11
+ IMAGE_KEYS = (
12
+ "base_0_rgb",
13
+ "left_wrist_0_rgb",
14
+ "right_wrist_0_rgb",
15
+ )
16
+
17
+ IMAGE_RESOLUTION = (224, 224)
18
+
19
+
20
+ def preprocess_observation_pytorch(
21
+ observation,
22
+ *,
23
+ train: bool = False,
24
+ image_keys: Sequence[str] = IMAGE_KEYS,
25
+ image_resolution: tuple[int, int] = IMAGE_RESOLUTION,
26
+ ):
27
+ """Torch.compile-compatible version of preprocess_observation_pytorch with simplified type annotations.
28
+
29
+ This function avoids complex type annotations that can cause torch.compile issues.
30
+ """
31
+ if not set(image_keys).issubset(observation.images):
32
+ raise ValueError(f"images dict missing keys: expected {image_keys}, got {list(observation.images)}")
33
+
34
+ batch_shape = observation.state.shape[:-1]
35
+
36
+ out_images = {}
37
+ for key in image_keys:
38
+ image = observation.images[key]
39
+
40
+ # TODO: This is a hack to handle both [B, C, H, W] and [B, H, W, C] formats
41
+ # Handle both [B, C, H, W] and [B, H, W, C] formats
42
+ is_channels_first = image.shape[1] == 3 # Check if channels are in dimension 1
43
+
44
+ if is_channels_first:
45
+ # Convert [B, C, H, W] to [B, H, W, C] for processing
46
+ image = image.permute(0, 2, 3, 1)
47
+
48
+ if image.shape[1:3] != image_resolution:
49
+ logger.info(f"Resizing image {key} from {image.shape[1:3]} to {image_resolution}")
50
+ image = image_tools.resize_with_pad_torch(image, *image_resolution)
51
+
52
+ if train:
53
+ # Convert from [-1, 1] to [0, 1] for PyTorch augmentations
54
+ image = image / 2.0 + 0.5
55
+
56
+ # Apply PyTorch-based augmentations
57
+ if "wrist" not in key:
58
+ # Geometric augmentations for non-wrist cameras
59
+ height, width = image.shape[1:3]
60
+
61
+ # Random crop and resize
62
+ crop_height = int(height * 0.95)
63
+ crop_width = int(width * 0.95)
64
+
65
+ # Random crop
66
+ max_h = height - crop_height
67
+ max_w = width - crop_width
68
+ if max_h > 0 and max_w > 0:
69
+ # Use tensor operations instead of .item() for torch.compile compatibility
70
+ start_h = torch.randint(0, max_h + 1, (1,), device=image.device)
71
+ start_w = torch.randint(0, max_w + 1, (1,), device=image.device)
72
+ image = image[:, start_h : start_h + crop_height, start_w : start_w + crop_width, :]
73
+
74
+ # Resize back to original size
75
+ image = torch.nn.functional.interpolate(
76
+ image.permute(0, 3, 1, 2), # [b, h, w, c] -> [b, c, h, w]
77
+ size=(height, width),
78
+ mode="bilinear",
79
+ align_corners=False,
80
+ ).permute(0, 2, 3, 1) # [b, c, h, w] -> [b, h, w, c]
81
+
82
+ # Random rotation (small angles)
83
+ # Use tensor operations instead of .item() for torch.compile compatibility
84
+ angle = torch.rand(1, device=image.device) * 10 - 5 # Random angle between -5 and 5 degrees
85
+ if torch.abs(angle) > 0.1: # Only rotate if angle is significant
86
+ # Convert to radians
87
+ angle_rad = angle * torch.pi / 180.0
88
+
89
+ # Create rotation matrix
90
+ cos_a = torch.cos(angle_rad)
91
+ sin_a = torch.sin(angle_rad)
92
+
93
+ # Apply rotation using grid_sample
94
+ grid_x = torch.linspace(-1, 1, width, device=image.device)
95
+ grid_y = torch.linspace(-1, 1, height, device=image.device)
96
+
97
+ # Create meshgrid
98
+ grid_y, grid_x = torch.meshgrid(grid_y, grid_x, indexing="ij")
99
+
100
+ # Expand to batch dimension
101
+ grid_x = grid_x.unsqueeze(0).expand(image.shape[0], -1, -1)
102
+ grid_y = grid_y.unsqueeze(0).expand(image.shape[0], -1, -1)
103
+
104
+ # Apply rotation transformation
105
+ grid_x_rot = grid_x * cos_a - grid_y * sin_a
106
+ grid_y_rot = grid_x * sin_a + grid_y * cos_a
107
+
108
+ # Stack and reshape for grid_sample
109
+ grid = torch.stack([grid_x_rot, grid_y_rot], dim=-1)
110
+
111
+ image = torch.nn.functional.grid_sample(
112
+ image.permute(0, 3, 1, 2), # [b, h, w, c] -> [b, c, h, w]
113
+ grid,
114
+ mode="bilinear",
115
+ padding_mode="zeros",
116
+ align_corners=False,
117
+ ).permute(0, 2, 3, 1) # [b, c, h, w] -> [b, h, w, c]
118
+
119
+ # Color augmentations for all cameras
120
+ # Random brightness
121
+ # Use tensor operations instead of .item() for torch.compile compatibility
122
+ brightness_factor = 0.7 + torch.rand(1, device=image.device) * 0.6 # Random factor between 0.7 and 1.3
123
+ image = image * brightness_factor
124
+
125
+ # Random contrast
126
+ # Use tensor operations instead of .item() for torch.compile compatibility
127
+ contrast_factor = 0.6 + torch.rand(1, device=image.device) * 0.8 # Random factor between 0.6 and 1.4
128
+ mean = image.mean(dim=[1, 2, 3], keepdim=True)
129
+ image = (image - mean) * contrast_factor + mean
130
+
131
+ # Random saturation (convert to HSV, modify S, convert back)
132
+ # For simplicity, we'll just apply a random scaling to the color channels
133
+ # Use tensor operations instead of .item() for torch.compile compatibility
134
+ saturation_factor = 0.5 + torch.rand(1, device=image.device) * 1.0 # Random factor between 0.5 and 1.5
135
+ gray = image.mean(dim=-1, keepdim=True)
136
+ image = gray + (image - gray) * saturation_factor
137
+
138
+ # Clamp values to [0, 1]
139
+ image = torch.clamp(image, 0, 1)
140
+
141
+ # Back to [-1, 1]
142
+ image = image * 2.0 - 1.0
143
+
144
+ # Convert back to [B, C, H, W] format if it was originally channels-first
145
+ if is_channels_first:
146
+ image = image.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W]
147
+
148
+ out_images[key] = image
149
+
150
+ # obtain mask
151
+ out_masks = {}
152
+ for key in out_images:
153
+ if key not in observation.image_masks:
154
+ # do not mask by default
155
+ out_masks[key] = torch.ones(batch_shape, dtype=torch.bool, device=observation.state.device)
156
+ else:
157
+ out_masks[key] = observation.image_masks[key]
158
+
159
+ # Create a simple object with the required attributes instead of using the complex Observation class
160
+ class SimpleProcessedObservation:
161
+ def __init__(self, **kwargs):
162
+ for key, value in kwargs.items():
163
+ setattr(self, key, value)
164
+
165
+ return SimpleProcessedObservation(
166
+ images=out_images,
167
+ image_masks=out_masks,
168
+ state=observation.state,
169
+ tokenized_prompt=observation.tokenized_prompt,
170
+ tokenized_prompt_mask=observation.tokenized_prompt_mask,
171
+ token_ar_mask=observation.token_ar_mask,
172
+ token_loss_mask=observation.token_loss_mask,
173
+ )
src/openpi/models_pytorch/transformers_replace/models/gemma/configuration_gemma.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/gemma/modular_gemma.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_gemma.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+ from typing import Optional
23
+ from ...configuration_utils import PretrainedConfig
24
+
25
+
26
+ class GemmaConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
29
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
30
+ defaults will yield a similar configuration to that of the Gemma-7B.
31
+ e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+ Args:
35
+ vocab_size (`int`, *optional*, defaults to 256000):
36
+ Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
37
+ `inputs_ids` passed when calling [`GemmaModel`]
38
+ hidden_size (`int`, *optional*, defaults to 3072):
39
+ Dimension of the hidden representations.
40
+ intermediate_size (`int`, *optional*, defaults to 24576):
41
+ Dimension of the MLP representations.
42
+ num_hidden_layers (`int`, *optional*, defaults to 28):
43
+ Number of hidden layers in the Transformer decoder.
44
+ num_attention_heads (`int`, *optional*, defaults to 16):
45
+ Number of attention heads for each attention layer in the Transformer decoder.
46
+ num_key_value_heads (`int`, *optional*, defaults to 16):
47
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
48
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
49
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
50
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
51
+ by meanpooling all the original heads within that group. For more details, check out [this
52
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
53
+ `num_attention_heads`.
54
+ head_dim (`int`, *optional*, defaults to 256):
55
+ The attention head dimension.
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
57
+ The legacy activation function. It is overwritten by the `hidden_activation`.
58
+ hidden_activation (`str` or `function`, *optional*):
59
+ The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
60
+ if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
61
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
62
+ The maximum sequence length that this model might ever be used with.
63
+ initializer_range (`float`, *optional*, defaults to 0.02):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
66
+ The epsilon used by the rms normalization layers.
67
+ use_cache (`bool`, *optional*, defaults to `True`):
68
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
69
+ relevant if `config.is_decoder=True`.
70
+ pad_token_id (`int`, *optional*, defaults to 0):
71
+ Padding token id.
72
+ eos_token_id (`int`, *optional*, defaults to 1):
73
+ End of stream token id.
74
+ bos_token_id (`int`, *optional*, defaults to 2):
75
+ Beginning of stream token id.
76
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
77
+ Whether to tie weight embeddings
78
+ rope_theta (`float`, *optional*, defaults to 10000.0):
79
+ The base period of the RoPE embeddings.
80
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
81
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
82
+ attention_dropout (`float`, *optional*, defaults to 0.0):
83
+ The dropout ratio for the attention probabilities.
84
+ use_adarms (`bool`, *optional*, defaults to `False`):
85
+ Whether to use ADARMS.
86
+ adarms_cond_dim (`int`, *optional*, defaults to `None`):
87
+ The dimension of the ADARMS condition.
88
+ ```python
89
+ >>> from transformers import GemmaModel, GemmaConfig
90
+ >>> # Initializing a Gemma gemma-7b style configuration
91
+ >>> configuration = GemmaConfig()
92
+ >>> # Initializing a model from the gemma-7b style configuration
93
+ >>> model = GemmaModel(configuration)
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "gemma"
99
+ keys_to_ignore_at_inference = ["past_key_values"]
100
+ base_model_tp_plan = {
101
+ "layers.*.self_attn.q_proj": "colwise",
102
+ "layers.*.self_attn.k_proj": "colwise",
103
+ "layers.*.self_attn.v_proj": "colwise",
104
+ "layers.*.self_attn.o_proj": "rowwise",
105
+ "layers.*.mlp.gate_proj": "colwise",
106
+ "layers.*.mlp.up_proj": "colwise",
107
+ "layers.*.mlp.down_proj": "rowwise",
108
+ }
109
+ base_model_pp_plan = {
110
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
111
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
112
+ "norm": (["hidden_states"], ["hidden_states"]),
113
+ }
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_size=256000,
118
+ hidden_size=3072,
119
+ intermediate_size=24576,
120
+ num_hidden_layers=28,
121
+ num_attention_heads=16,
122
+ num_key_value_heads=16,
123
+ head_dim=256,
124
+ hidden_act="gelu_pytorch_tanh",
125
+ hidden_activation=None,
126
+ max_position_embeddings=8192,
127
+ initializer_range=0.02,
128
+ rms_norm_eps=1e-6,
129
+ use_cache=True,
130
+ pad_token_id=0,
131
+ eos_token_id=1,
132
+ bos_token_id=2,
133
+ tie_word_embeddings=True,
134
+ rope_theta=10000.0,
135
+ attention_bias=False,
136
+ attention_dropout=0.0,
137
+ use_adarms: bool = False,
138
+ adarms_cond_dim: Optional[int] = None,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.max_position_embeddings = max_position_embeddings
143
+ self.hidden_size = hidden_size
144
+ self.intermediate_size = intermediate_size
145
+ self.num_hidden_layers = num_hidden_layers
146
+ self.num_attention_heads = num_attention_heads
147
+ self.head_dim = head_dim
148
+ self.num_key_value_heads = num_key_value_heads
149
+ self.hidden_act = hidden_act
150
+ self.hidden_activation = hidden_activation
151
+ self.initializer_range = initializer_range
152
+ self.rms_norm_eps = rms_norm_eps
153
+ self.use_cache = use_cache
154
+ self.rope_theta = rope_theta
155
+ self.attention_bias = attention_bias
156
+ self.attention_dropout = attention_dropout
157
+ self.use_adarms = use_adarms
158
+ self.adarms_cond_dim = adarms_cond_dim
159
+
160
+ # Set default for adarms_cond_dim if use_adarms is True
161
+ if self.use_adarms and self.adarms_cond_dim is None:
162
+ self.adarms_cond_dim = self.hidden_size
163
+
164
+ super().__init__(
165
+ pad_token_id=pad_token_id,
166
+ bos_token_id=bos_token_id,
167
+ eos_token_id=eos_token_id,
168
+ tie_word_embeddings=tie_word_embeddings,
169
+ **kwargs,
170
+ )
171
+
172
+
173
+ __all__ = ["GemmaConfig"]