Update README.md
Browse files
README.md
CHANGED
|
@@ -62,29 +62,26 @@ task_categories:
|
|
| 62 |
```bash
|
| 63 |
pip install -r requirements.txt
|
| 64 |
```
|
| 65 |
-
##### 2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
- `--gt_root`: Ground truth data root directory (required)
|
| 68 |
- `--test_root`: Test data root directory (required)
|
| 69 |
- `--dino_path`: DINOv3 model weights directory (default: `./dinov3_vitb16`)
|
| 70 |
- `--num_gpus`: Number of GPUs to use for parallel processing (default: 1)
|
| 71 |
- `--video_max_time`: Maximum video frames to process (default: `None` = use all frames)
|
| 72 |
- `--output`: Output JSON file path (default: `result_{test_root}_{timestamp}.json`)
|
| 73 |
-
- `--metrics`: Comma-separated metrics to compute (default: `lcm,visual,dino,action`)
|
| 74 |
-
##### 3.
|
| 75 |
-
|
| 76 |
-
The metrics computation supports multi-GPU parallel processing for faster evaluation.
|
| 77 |
-
|
| 78 |
-
```bash
|
| 79 |
-
python src/process.py --gt_root /path/to/MIND-Data --test_root /path/to/test/videos --num_gpus 8 --metrics lcm,visual,action
|
| 80 |
-
```
|
| 81 |
-
**How Multi-GPU Works**
|
| 82 |
-
- Videos are put into a task queue.
|
| 83 |
-
- Each GPU process take one task from the queue when vacant.
|
| 84 |
-
- If failed, the task will be put back into the queue.
|
| 85 |
-
- Progress bars show accumulation for all results.
|
| 86 |
-
- Every time when a task is finished, the result file is updated. You can obtain intermediate results from the file.
|
| 87 |
-
##### 4. How to order your test files
|
| 88 |
```
|
| 89 |
{model_name}
|
| 90 |
βββ 1st_data
|
|
@@ -94,8 +91,12 @@ python src/process.py --gt_root /path/to/MIND-Data --test_root /path/to/test/vid
|
|
| 94 |
| | ...
|
| 95 |
| |
|
| 96 |
β βββ mirror_test
|
| 97 |
-
β | βββ {
|
| 98 |
-
β
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
| | ...
|
| 100 |
| |
|
| 101 |
| βββ mem_test
|
|
@@ -110,8 +111,12 @@ python src/process.py --gt_root /path/to/MIND-Data --test_root /path/to/test/vid
|
|
| 110 |
| | ...
|
| 111 |
| |
|
| 112 |
β βββ mirror_test
|
| 113 |
-
β | βββ {
|
| 114 |
-
β
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
| | ...
|
| 116 |
| |
|
| 117 |
β βββ mem_test
|
|
@@ -123,7 +128,7 @@ python src/process.py --gt_root /path/to/MIND-Data --test_root /path/to/test/vid
|
|
| 123 |
- `{corresponding data name}`: corresponding ground truth data file name
|
| 124 |
|
| 125 |
|
| 126 |
-
#####
|
| 127 |
|
| 128 |
```
|
| 129 |
{
|
|
@@ -137,6 +142,26 @@ python src/process.py --gt_root /path/to/MIND-Data --test_root /path/to/test/vid
|
|
| 137 |
"mark_time": [int] the divider of memory context and expected perdiction; the start frame index of the expected prediction.
|
| 138 |
"total_time": [int] the total frames of the ground truth video.
|
| 139 |
"sample_frames": [int ]the total frames of the video to be tested.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
"lcm": { the long context memory metric result.
|
| 141 |
"mse": [list[float]] the per-frame mean square error.
|
| 142 |
"avg_mse": [float] the average of mse.
|
|
@@ -209,18 +234,20 @@ MIND-Data
|
|
| 209 |
βββ 1st_data
|
| 210 |
β βββ test
|
| 211 |
β β βββ action_space_test
|
| 212 |
-
β β β βββ data
|
| 213 |
β β β β βββ action.json
|
|
|
|
| 214 |
β β β β βββ video.mp4
|
| 215 |
| | | ...
|
| 216 |
| | |
|
| 217 |
β β βββ mem_test
|
| 218 |
-
β β βββ data
|
| 219 |
β β β βββ action.json
|
|
|
|
| 220 |
β β β βββ video.mp4
|
| 221 |
| | ...
|
| 222 |
| βββ train
|
| 223 |
-
| βββ data
|
| 224 |
| β βββ action.json
|
| 225 |
| β βββ video.mp4
|
| 226 |
| ...
|
|
@@ -228,18 +255,20 @@ MIND-Data
|
|
| 228 |
βββ 3rd_data
|
| 229 |
β βββ test
|
| 230 |
β β βββ action_space_test
|
| 231 |
-
β β β βββ data
|
| 232 |
β β β β βββ action.json
|
|
|
|
| 233 |
β β β β βββ video.mp4
|
| 234 |
| | | ...
|
| 235 |
| | |
|
| 236 |
β β βββ mem_test
|
| 237 |
-
β β βββ data
|
| 238 |
β β β βββ action.json
|
|
|
|
| 239 |
β β β βββ video.mp4
|
| 240 |
| | ...
|
| 241 |
| βββ train
|
| 242 |
-
| βββ data
|
| 243 |
| β βββ action.json
|
| 244 |
| β βββ video.mp4
|
| 245 |
| ...
|
|
|
|
| 62 |
```bash
|
| 63 |
pip install -r requirements.txt
|
| 64 |
```
|
| 65 |
+
##### 2. Multi-GPU Support
|
| 66 |
+
How Multi-GPU Works
|
| 67 |
+
- Videos are put into a task queue.
|
| 68 |
+
- Each GPU process take one task from the queue when vacant.
|
| 69 |
+
- If failed, the task will be put back into the queue.
|
| 70 |
+
- Progress bars show accumulation for all results.
|
| 71 |
+
- Every time when a task is finished, the result file is updated. You can obtain intermediate results from the file.
|
| 72 |
|
| 73 |
+
The metrics computation supports multi-GPU parallel processing for faster evaluation.
|
| 74 |
+
```bash
|
| 75 |
+
python src/process.py --gt_root /path/to/MIND-Data --test_root /path/to/test/videos --num_gpus 8 --metrics lcm,visual,action
|
| 76 |
+
```
|
| 77 |
- `--gt_root`: Ground truth data root directory (required)
|
| 78 |
- `--test_root`: Test data root directory (required)
|
| 79 |
- `--dino_path`: DINOv3 model weights directory (default: `./dinov3_vitb16`)
|
| 80 |
- `--num_gpus`: Number of GPUs to use for parallel processing (default: 1)
|
| 81 |
- `--video_max_time`: Maximum video frames to process (default: `None` = use all frames)
|
| 82 |
- `--output`: Output JSON file path (default: `result_{test_root}_{timestamp}.json`)
|
| 83 |
+
- `--metrics`: Comma-separated metrics to compute (default: `lcm,visual,dino,action,gsc`)
|
| 84 |
+
##### 3. How to order your test files
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
```
|
| 86 |
{model_name}
|
| 87 |
βββ 1st_data
|
|
|
|
| 91 |
| | ...
|
| 92 |
| |
|
| 93 |
β βββ mirror_test
|
| 94 |
+
β | βββ {arbitrary data name}
|
| 95 |
+
β β β βββ path-1.mp4
|
| 96 |
+
β β β βββ path-2.mp4
|
| 97 |
+
β β β βββ path-3.mp4
|
| 98 |
+
β β β ...
|
| 99 |
+
β β β βββ path-10.mp4
|
| 100 |
| | ...
|
| 101 |
| |
|
| 102 |
| βββ mem_test
|
|
|
|
| 111 |
| | ...
|
| 112 |
| |
|
| 113 |
β βββ mirror_test
|
| 114 |
+
β | βββ {carbitrary data name}
|
| 115 |
+
β β β βββ path-1.mp4
|
| 116 |
+
β β β βββ path-2.mp4
|
| 117 |
+
β β β βββ path-3.mp4
|
| 118 |
+
β β β ...
|
| 119 |
+
β β β βββ path-10.mp4
|
| 120 |
| | ...
|
| 121 |
| |
|
| 122 |
β βββ mem_test
|
|
|
|
| 128 |
- `{corresponding data name}`: corresponding ground truth data file name
|
| 129 |
|
| 130 |
|
| 131 |
+
##### 4. The detailed information of output **<span style="color:red">`Result.json`</span>**
|
| 132 |
|
| 133 |
```
|
| 134 |
{
|
|
|
|
| 142 |
"mark_time": [int] the divider of memory context and expected perdiction; the start frame index of the expected prediction.
|
| 143 |
"total_time": [int] the total frames of the ground truth video.
|
| 144 |
"sample_frames": [int ]the total frames of the video to be tested.
|
| 145 |
+
"video_results": [ the general scene consistency metric result.
|
| 146 |
+
{
|
| 147 |
+
"video_name": [string] the name of the video of the specific action path
|
| 148 |
+
"error": [string] the error occur when computing metrics in this video
|
| 149 |
+
"mark_time": [int] the divider of prediction and mirror perdiction; the start frame index of the mirror prediction.
|
| 150 |
+
"sample_frames": [int] total frames of prediction and mirror perdiction; should be 2x of marktime.
|
| 151 |
+
"gsc": {
|
| 152 |
+
"length": [int] length of the origin prediction and the mirror prediction.
|
| 153 |
+
"mse": [list[float]] the per-frame mean square error.
|
| 154 |
+
"avg_mse": [float] the average of mse.
|
| 155 |
+
"lpips": [list[float]] the per-frame Learned Perceptual Image Patch Similarity.
|
| 156 |
+
"avg_lpips": [float] the average of lpips.
|
| 157 |
+
"ssim": [list[float]] the per-frame Structural Similarity Index Measure.
|
| 158 |
+
"avg_ssim": [float] the average of ssim.
|
| 159 |
+
"psnr": [list[float]] the per-frame Peak Signal-to-Noise Ratio.
|
| 160 |
+
"avg_psnr": [float] the average of psnr.
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
...
|
| 164 |
+
]
|
| 165 |
"lcm": { the long context memory metric result.
|
| 166 |
"mse": [list[float]] the per-frame mean square error.
|
| 167 |
"avg_mse": [float] the average of mse.
|
|
|
|
| 234 |
βββ 1st_data
|
| 235 |
β βββ test
|
| 236 |
β β βββ action_space_test
|
| 237 |
+
β β β βββ {gt data name}
|
| 238 |
β β β β βββ action.json
|
| 239 |
+
β β β β βββ images.txt
|
| 240 |
β β β β βββ video.mp4
|
| 241 |
| | | ...
|
| 242 |
| | |
|
| 243 |
β β βββ mem_test
|
| 244 |
+
β β βββ {gt data name}
|
| 245 |
β β β βββ action.json
|
| 246 |
+
β β β βββ images.txt
|
| 247 |
β β β βββ video.mp4
|
| 248 |
| | ...
|
| 249 |
| βββ train
|
| 250 |
+
| βββ {gt data name}
|
| 251 |
| β βββ action.json
|
| 252 |
| β βββ video.mp4
|
| 253 |
| ...
|
|
|
|
| 255 |
βββ 3rd_data
|
| 256 |
β βββ test
|
| 257 |
β β βββ action_space_test
|
| 258 |
+
β β β βββ {gt data name}
|
| 259 |
β β β β βββ action.json
|
| 260 |
+
β β β β βββ images.txt
|
| 261 |
β β β β βββ video.mp4
|
| 262 |
| | | ...
|
| 263 |
| | |
|
| 264 |
β β βββ mem_test
|
| 265 |
+
β β βββ {gt data name}
|
| 266 |
β β β βββ action.json
|
| 267 |
+
β β β βββ images.txt
|
| 268 |
β β β βββ video.mp4
|
| 269 |
| | ...
|
| 270 |
| βββ train
|
| 271 |
+
| βββ {gt data name}
|
| 272 |
| β βββ action.json
|
| 273 |
| β βββ video.mp4
|
| 274 |
| ...
|