ToToKu commited on
Commit ·
06288ae
1
Parent(s): edec42f
Add skippable success
Browse files- .gitignore +1 -0
- README.md +43 -0
- __pycache__/run_suite.cpython-314.pyc +0 -0
- run_suite.py +48 -1
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
outputs/
|
README.md
CHANGED
|
@@ -42,6 +42,38 @@ apex-test-suite/
|
|
| 42 |
|
| 43 |
### 1) Download the dataset from HuggingFace
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
Pick one:
|
| 46 |
|
| 47 |
- **Option A: `huggingface_hub` snapshot download**
|
|
@@ -130,6 +162,17 @@ Run a subset by filename substring:
|
|
| 130 |
.venv/bin/python apps/api/test_suite/run_suite.py --filter seedvr2
|
| 131 |
```
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
Run a single test JSON by absolute path:
|
| 134 |
|
| 135 |
```bash
|
|
|
|
| 42 |
|
| 43 |
### 1) Download the dataset from HuggingFace
|
| 44 |
|
| 45 |
+
### Hugging Face auth (recommended for private/gated repos)
|
| 46 |
+
|
| 47 |
+
If you see `401/403 Unauthorized` from Hugging Face (dataset downloads or model/component downloads), export a token in your shell **before** running the suite/API.
|
| 48 |
+
|
| 49 |
+
- **Linux / macOS (bash/zsh)**
|
| 50 |
+
|
| 51 |
+
```bash
|
| 52 |
+
export HF_TOKEN="hf_xxx"
|
| 53 |
+
# or (also supported)
|
| 54 |
+
export HUGGING_FACE_HUB_TOKEN="hf_xxx"
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
- **Windows (PowerShell)**
|
| 58 |
+
|
| 59 |
+
```powershell
|
| 60 |
+
$env:HF_TOKEN="hf_xxx"
|
| 61 |
+
# or
|
| 62 |
+
$env:HUGGING_FACE_HUB_TOKEN="hf_xxx"
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
- **Windows (cmd.exe)**
|
| 66 |
+
|
| 67 |
+
```bat
|
| 68 |
+
set HF_TOKEN=hf_xxx
|
| 69 |
+
REM or
|
| 70 |
+
set HUGGING_FACE_HUB_TOKEN=hf_xxx
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
Notes:
|
| 74 |
+
- `hf auth login` stores a token on disk, but exporting `HF_TOKEN` is the most reliable option for test runs (especially when subprocesses/Ray workers are involved).
|
| 75 |
+
- For **gated models**, you must also request/accept access on the model page in Hugging Face; a valid token alone can still return 401/403.
|
| 76 |
+
|
| 77 |
Pick one:
|
| 78 |
|
| 79 |
- **Option A: `huggingface_hub` snapshot download**
|
|
|
|
| 162 |
.venv/bin/python apps/api/test_suite/run_suite.py --filter seedvr2
|
| 163 |
```
|
| 164 |
|
| 165 |
+
Resume / rerun only failures (skip successes):
|
| 166 |
+
|
| 167 |
+
This treats a test as “successful” if its expected artifact already exists under `outputs/`
|
| 168 |
+
(e.g. `outputs/<json_stem>.png` for `image/` tests, `outputs/<json_stem>.mp4` for `video/` / `upscalers/`).
|
| 169 |
+
|
| 170 |
+
```bash
|
| 171 |
+
.venv/bin/python apps/api/test_suite/run_suite.py --skip-successes
|
| 172 |
+
# (alias)
|
| 173 |
+
.venv/bin/python apps/api/test_suite/run_suite.py --only-failed
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
Run a single test JSON by absolute path:
|
| 177 |
|
| 178 |
```bash
|
__pycache__/run_suite.cpython-314.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
run_suite.py
CHANGED
|
@@ -152,6 +152,23 @@ def _match_filters(p: Path, kind: Optional[str], name_contains: Optional[str]) -
|
|
| 152 |
return True
|
| 153 |
|
| 154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
def _parse_child_json(stdout: str) -> Dict[str, Any]:
|
| 156 |
# Child prints one JSON object as the last line. Be defensive in case logs leaked.
|
| 157 |
lines = [ln.strip() for ln in stdout.splitlines() if ln.strip()]
|
|
@@ -253,6 +270,23 @@ def main() -> int:
|
|
| 253 |
"'on' enables previews for all tests."
|
| 254 |
),
|
| 255 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
args = parser.parse_args()
|
| 257 |
|
| 258 |
api_root = Path(__file__).resolve().parents[1]
|
|
@@ -292,10 +326,22 @@ def main() -> int:
|
|
| 292 |
if not nunchaku_available:
|
| 293 |
tests = [p for p in tests if "nunchaku" not in p.name.lower()]
|
| 294 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
started = time.perf_counter()
|
| 296 |
results: List[Dict[str, Any]] = []
|
| 297 |
|
| 298 |
-
for idx, p in enumerate(tests):
|
| 299 |
env = os.environ.copy()
|
| 300 |
env["PYTHONUNBUFFERED"] = "1"
|
| 301 |
# Reduce Ray log de-duping so you can see what's happening during long runs.
|
|
@@ -357,6 +403,7 @@ def main() -> int:
|
|
| 357 |
"skipped": len(skipped),
|
| 358 |
"total_elapsed_s": total_s,
|
| 359 |
"arch": (results[0].get("arch") if results else None),
|
|
|
|
| 360 |
"results": results,
|
| 361 |
}
|
| 362 |
|
|
|
|
| 152 |
return True
|
| 153 |
|
| 154 |
|
| 155 |
+
def _expected_output_path_for_test(test_json: Path, outputs_dir: Path) -> Path:
|
| 156 |
+
"""
|
| 157 |
+
Determine the expected output artifact path for a test JSON based on its kind.
|
| 158 |
+
|
| 159 |
+
Canonical naming matches `run_one.py` behavior:
|
| 160 |
+
outputs/<json_stem>.<ext>
|
| 161 |
+
|
| 162 |
+
Default mapping:
|
| 163 |
+
- image/*.json -> .png
|
| 164 |
+
- video/*.json -> .mp4
|
| 165 |
+
- upscalers/*.json -> .mp4
|
| 166 |
+
"""
|
| 167 |
+
kind = test_json.parts[-2] if len(test_json.parts) >= 2 else ""
|
| 168 |
+
ext = ".png" if kind == "image" else ".mp4"
|
| 169 |
+
return outputs_dir / f"{test_json.stem}{ext}"
|
| 170 |
+
|
| 171 |
+
|
| 172 |
def _parse_child_json(stdout: str) -> Dict[str, Any]:
|
| 173 |
# Child prints one JSON object as the last line. Be defensive in case logs leaked.
|
| 174 |
lines = [ln.strip() for ln in stdout.splitlines() if ln.strip()]
|
|
|
|
| 270 |
"'on' enables previews for all tests."
|
| 271 |
),
|
| 272 |
)
|
| 273 |
+
|
| 274 |
+
parser.add_argument(
|
| 275 |
+
"--skip-count",
|
| 276 |
+
type=int,
|
| 277 |
+
default=0,
|
| 278 |
+
help="Number of tests to skip at the beginning.",
|
| 279 |
+
)
|
| 280 |
+
parser.add_argument(
|
| 281 |
+
"--only-failed",
|
| 282 |
+
"--skip-successes",
|
| 283 |
+
dest="only_failed",
|
| 284 |
+
action="store_true",
|
| 285 |
+
help=(
|
| 286 |
+
"Skip tests that already have their expected output artifact in outputs/. "
|
| 287 |
+
"This effectively runs only tests that are missing outputs (i.e. previously failed/incomplete)."
|
| 288 |
+
),
|
| 289 |
+
)
|
| 290 |
args = parser.parse_args()
|
| 291 |
|
| 292 |
api_root = Path(__file__).resolve().parents[1]
|
|
|
|
| 326 |
if not nunchaku_available:
|
| 327 |
tests = [p for p in tests if "nunchaku" not in p.name.lower()]
|
| 328 |
|
| 329 |
+
# Optionally filter to tests missing their expected output artifact in outputs/.
|
| 330 |
+
if args.only_failed and not args.json:
|
| 331 |
+
before = len(tests)
|
| 332 |
+
tests = [
|
| 333 |
+
p
|
| 334 |
+
for p in tests
|
| 335 |
+
if not _expected_output_path_for_test(p, outputs_dir).exists()
|
| 336 |
+
]
|
| 337 |
+
print(
|
| 338 |
+
f"Skip-successes mode: selected {len(tests)} tests missing outputs (from {before} discovered)."
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
started = time.perf_counter()
|
| 342 |
results: List[Dict[str, Any]] = []
|
| 343 |
|
| 344 |
+
for idx, p in enumerate(tests[args.skip_count:]):
|
| 345 |
env = os.environ.copy()
|
| 346 |
env["PYTHONUNBUFFERED"] = "1"
|
| 347 |
# Reduce Ray log de-duping so you can see what's happening during long runs.
|
|
|
|
| 403 |
"skipped": len(skipped),
|
| 404 |
"total_elapsed_s": total_s,
|
| 405 |
"arch": (results[0].get("arch") if results else None),
|
| 406 |
+
"only_failed": bool(args.only_failed and not args.json),
|
| 407 |
"results": results,
|
| 408 |
}
|
| 409 |
|