bosungkim commited on
Commit
a64ddf3
·
verified ·
1 Parent(s): e24d527

Upload colab_checkpoint_eval.ipynb with huggingface_hub

Browse files
Files changed (1) hide show
  1. colab_checkpoint_eval.ipynb +268 -0
colab_checkpoint_eval.ipynb ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Cosmos-Predict2.5 Checkpoint Evaluation\n",
8
+ "\n",
9
+ "Evaluate fine-tuned checkpoints for iterative video generation on RoboCasa tasks.\n",
10
+ "\n",
11
+ "**Prerequisites:** Upload data to HuggingFace Hub first (see upload commands in the repo)."
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "markdown",
16
+ "metadata": {},
17
+ "source": [
18
+ "## 1. Setup Environment"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": [
27
+ "# Install cosmos-predict2.5\n",
28
+ "!git clone https://github.com/nvidia-cosmos/cosmos-predict2.5.git /content/cosmos-predict2.5\n",
29
+ "%cd /content/cosmos-predict2.5\n",
30
+ "!pip install -e '.[all]' 2>&1 | tail -5"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "# Login to HuggingFace to download data\n",
40
+ "from huggingface_hub import login, hf_hub_download, snapshot_download\n",
41
+ "login() # Enter your HF token when prompted"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "markdown",
46
+ "metadata": {},
47
+ "source": "import os\n\n# === CONFIGURE THESE ===\nHF_REPO = \"bosungkim/cosmos-predict2.5-eval\"\nDATA_DIR = \"/content/data\"\nos.makedirs(DATA_DIR, exist_ok=True)\n\n# Download checkpoints\nprint(\"Downloading checkpoints...\")\nsnapshot_download(\n repo_id=HF_REPO,\n repo_type=\"model\",\n local_dir=DATA_DIR,\n allow_patterns=[\"checkpoints/*\", \"robocasa_data/*\", \"initial_images/*\"],\n)\nprint(\"Download complete!\")\n\n# Verify files\n!find {DATA_DIR} -name \"*.pt\" -exec ls -lh {} \\;\n!find {DATA_DIR} -name \"*.json\" -exec ls -lh {} \\;\n!find {DATA_DIR} -name \"*.jpg\" -exec ls -lh {} \\;"
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": null,
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "import os\n",
56
+ "\n",
57
+ "# === CONFIGURE THESE ===\n",
58
+ "HF_REPO = \"bkim/cosmos-predict2.5-eval\" # Change to your HF repo\n",
59
+ "DATA_DIR = \"/content/data\"\n",
60
+ "os.makedirs(DATA_DIR, exist_ok=True)\n",
61
+ "\n",
62
+ "# Download checkpoints\n",
63
+ "print(\"Downloading checkpoints...\")\n",
64
+ "snapshot_download(\n",
65
+ " repo_id=HF_REPO,\n",
66
+ " repo_type=\"model\",\n",
67
+ " local_dir=DATA_DIR,\n",
68
+ " allow_patterns=[\"checkpoints/*\", \"robocasa_data/*\", \"initial_images/*\"],\n",
69
+ ")\n",
70
+ "print(\"Download complete!\")\n",
71
+ "\n",
72
+ "# Verify files\n",
73
+ "!find {DATA_DIR} -name \"*.pt\" -exec ls -lh {} \\;\n",
74
+ "!find {DATA_DIR} -name \"*.json\" -exec ls -lh {} \\;\n",
75
+ "!find {DATA_DIR} -name \"*.jpg\" -exec ls -lh {} \\;"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "markdown",
80
+ "metadata": {},
81
+ "source": [
82
+ "## 3. Configuration"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": null,
88
+ "metadata": {},
89
+ "outputs": [],
90
+ "source": [
91
+ "# ========== CONFIGURATION ==========\n",
92
+ "# Task selection: uncomment the task you want to evaluate\n",
93
+ "\n",
94
+ "# --- ArrangeVegetables ---\n",
95
+ "INITIAL_IMAGE = f\"{DATA_DIR}/initial_images/ArrangeVegetables__2024-05-11__demo_1_grid_first.jpg\"\n",
96
+ "TASK_NAME = \"ArrangeVegetables\"\n",
97
+ "\n",
98
+ "# --- PrepareCoffee ---\n",
99
+ "# INITIAL_IMAGE = f\"{DATA_DIR}/initial_images/PrepareCoffee__2024-05-07__demo_1_grid_first.jpg\"\n",
100
+ "# TASK_NAME = \"PrepareCoffee\"\n",
101
+ "\n",
102
+ "DATASET_PATH = f\"{DATA_DIR}/robocasa_data\"\n",
103
+ "JSON_FILE = f\"{DATA_DIR}/robocasa_data/robocasa_composite_to_atomic_decomposition_finegrained.json\"\n",
104
+ "RESOLUTION = \"432,768\"\n",
105
+ "GUIDANCE = 7\n",
106
+ "NUM_STEPS = 50\n",
107
+ "NUM_VIDEO_FRAMES = 77\n",
108
+ "\n",
109
+ "# Checkpoint paths\n",
110
+ "CHECKPOINT_ITERS = [2000, 2200, 2400, 2600]\n",
111
+ "CHECKPOINTS = [\n",
112
+ " f\"{DATA_DIR}/checkpoints/iter_{it:09d}/model_ema_bf16.pt\"\n",
113
+ " for it in CHECKPOINT_ITERS\n",
114
+ "]\n",
115
+ "\n",
116
+ "# Verify all files exist\n",
117
+ "for f in [INITIAL_IMAGE, JSON_FILE] + CHECKPOINTS:\n",
118
+ " assert os.path.exists(f), f\"Missing: {f}\"\n",
119
+ "print(f\"Task: {TASK_NAME}\")\n",
120
+ "print(f\"Checkpoints: {len(CHECKPOINTS)}\")\n",
121
+ "print(\"All files verified!\")"
122
+ ]
123
+ },
124
+ {
125
+ "cell_type": "markdown",
126
+ "metadata": {},
127
+ "source": [
128
+ "## 4. Run Evaluation (Sequential)\n",
129
+ "\n",
130
+ "Colab has a single GPU, so we run checkpoints sequentially."
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": null,
136
+ "metadata": {},
137
+ "outputs": [],
138
+ "source": [
139
+ "import subprocess\n",
140
+ "import time\n",
141
+ "\n",
142
+ "OUTPUT_BASE = \"/content/outputs/checkpoint_comparison\"\n",
143
+ "os.makedirs(OUTPUT_BASE, exist_ok=True)\n",
144
+ "\n",
145
+ "results = []\n",
146
+ "\n",
147
+ "for i, (ckpt_path, ckpt_iter) in enumerate(zip(CHECKPOINTS, CHECKPOINT_ITERS)):\n",
148
+ " iter_str = f\"iter_{ckpt_iter:09d}\"\n",
149
+ " output_dir = f\"{OUTPUT_BASE}/{iter_str}\"\n",
150
+ " log_file = f\"{output_dir}/run.log\"\n",
151
+ " os.makedirs(output_dir, exist_ok=True)\n",
152
+ "\n",
153
+ " print(f\"\\n{'='*60}\")\n",
154
+ " print(f\"[{i+1}/{len(CHECKPOINTS)}] Running {iter_str}\")\n",
155
+ " print(f\"{'='*60}\")\n",
156
+ "\n",
157
+ " cmd = [\n",
158
+ " \"python\", \"examples/iterative_video_gen.py\",\n",
159
+ " \"--output-dir\", output_dir,\n",
160
+ " \"--dataset-path\", DATASET_PATH,\n",
161
+ " \"--json-file\", JSON_FILE,\n",
162
+ " \"--initial-image-path\", INITIAL_IMAGE,\n",
163
+ " \"--checkpoint-path\", ckpt_path,\n",
164
+ " \"--resolution\", RESOLUTION,\n",
165
+ " \"--num-video-frames\", str(NUM_VIDEO_FRAMES),\n",
166
+ " \"--task-name\", TASK_NAME,\n",
167
+ " \"--guidance\", str(GUIDANCE),\n",
168
+ " \"--num-steps\", str(NUM_STEPS),\n",
169
+ " ]\n",
170
+ "\n",
171
+ " start_time = time.time()\n",
172
+ " with open(log_file, \"w\") as lf:\n",
173
+ " proc = subprocess.run(\n",
174
+ " cmd,\n",
175
+ " stdout=lf,\n",
176
+ " stderr=subprocess.STDOUT,\n",
177
+ " env={**os.environ, \"PYTHONPATH\": \"/content/cosmos-predict2.5\"},\n",
178
+ " cwd=\"/content/cosmos-predict2.5\",\n",
179
+ " )\n",
180
+ " elapsed = time.time() - start_time\n",
181
+ "\n",
182
+ " status = \"SUCCESS\" if proc.returncode == 0 else \"FAILED\"\n",
183
+ " results.append((iter_str, status, elapsed))\n",
184
+ " print(f\" {status} ({elapsed:.1f}s) - log: {log_file}\")\n",
185
+ "\n",
186
+ "print(f\"\\n{'='*60}\")\n",
187
+ "print(\"Summary:\")\n",
188
+ "for iter_str, status, elapsed in results:\n",
189
+ " print(f\" {iter_str}: {status} ({elapsed:.1f}s)\")\n",
190
+ "print(f\"{'='*60}\")"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "markdown",
195
+ "metadata": {},
196
+ "source": [
197
+ "## 5. View Results"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "code",
202
+ "execution_count": null,
203
+ "metadata": {},
204
+ "outputs": [],
205
+ "source": [
206
+ "import glob\n",
207
+ "from IPython.display import display, HTML, Video\n",
208
+ "\n",
209
+ "# Find all generated videos\n",
210
+ "video_files = sorted(glob.glob(f\"{OUTPUT_BASE}/*/task_{TASK_NAME}/*.mp4\"))\n",
211
+ "print(f\"Found {len(video_files)} videos:\\n\")\n",
212
+ "for vf in video_files:\n",
213
+ " print(f\" {vf}\")"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": null,
219
+ "metadata": {},
220
+ "outputs": [],
221
+ "source": [
222
+ "# Display videos inline (final_concatenated for each checkpoint)\n",
223
+ "for vf in video_files:\n",
224
+ " if \"final_concatenated\" in vf:\n",
225
+ " iter_name = vf.split(\"/checkpoint_comparison/\")[1].split(\"/\")[0]\n",
226
+ " print(f\"\\n--- {iter_name} ---\")\n",
227
+ " display(Video(vf, embed=True, width=768))"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "markdown",
232
+ "metadata": {},
233
+ "source": [
234
+ "## 6. Download Results"
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": null,
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": [
243
+ "# Zip results for download\n",
244
+ "!cd /content && zip -r checkpoint_eval_results.zip outputs/checkpoint_comparison/\n",
245
+ "\n",
246
+ "from google.colab import files\n",
247
+ "files.download(\"/content/checkpoint_eval_results.zip\")"
248
+ ]
249
+ }
250
+ ],
251
+ "metadata": {
252
+ "accelerator": "GPU",
253
+ "colab": {
254
+ "gpuType": "A100",
255
+ "provenance": []
256
+ },
257
+ "kernelspec": {
258
+ "display_name": "Python 3",
259
+ "name": "python3"
260
+ },
261
+ "language_info": {
262
+ "name": "python",
263
+ "version": "3.10.0"
264
+ }
265
+ },
266
+ "nbformat": 4,
267
+ "nbformat_minor": 0
268
+ }