Upload 2025-08-26/runs/4874-17231951358/ci_results_run_models_gpu/model_results.json with huggingface_hub
Browse files
2025-08-26/runs/4874-17231951358/ci_results_run_models_gpu/model_results.json
ADDED
|
@@ -0,0 +1,1513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"models_auto": {
|
| 3 |
+
"failed": {
|
| 4 |
+
"PyTorch": {
|
| 5 |
+
"unclassified": 0,
|
| 6 |
+
"single": 0,
|
| 7 |
+
"multi": 0
|
| 8 |
+
},
|
| 9 |
+
"TensorFlow": {
|
| 10 |
+
"unclassified": 0,
|
| 11 |
+
"single": 0,
|
| 12 |
+
"multi": 0
|
| 13 |
+
},
|
| 14 |
+
"Flax": {
|
| 15 |
+
"unclassified": 0,
|
| 16 |
+
"single": 0,
|
| 17 |
+
"multi": 0
|
| 18 |
+
},
|
| 19 |
+
"Tokenizers": {
|
| 20 |
+
"unclassified": 0,
|
| 21 |
+
"single": 0,
|
| 22 |
+
"multi": 0
|
| 23 |
+
},
|
| 24 |
+
"Pipelines": {
|
| 25 |
+
"unclassified": 0,
|
| 26 |
+
"single": 0,
|
| 27 |
+
"multi": 0
|
| 28 |
+
},
|
| 29 |
+
"Trainer": {
|
| 30 |
+
"unclassified": 0,
|
| 31 |
+
"single": 0,
|
| 32 |
+
"multi": 0
|
| 33 |
+
},
|
| 34 |
+
"ONNX": {
|
| 35 |
+
"unclassified": 0,
|
| 36 |
+
"single": 0,
|
| 37 |
+
"multi": 0
|
| 38 |
+
},
|
| 39 |
+
"Auto": {
|
| 40 |
+
"unclassified": 0,
|
| 41 |
+
"single": 0,
|
| 42 |
+
"multi": 0
|
| 43 |
+
},
|
| 44 |
+
"Quantization": {
|
| 45 |
+
"unclassified": 0,
|
| 46 |
+
"single": 0,
|
| 47 |
+
"multi": 0
|
| 48 |
+
},
|
| 49 |
+
"Unclassified": {
|
| 50 |
+
"unclassified": 0,
|
| 51 |
+
"single": 0,
|
| 52 |
+
"multi": 0
|
| 53 |
+
}
|
| 54 |
+
},
|
| 55 |
+
"errors": 0,
|
| 56 |
+
"success": 226,
|
| 57 |
+
"skipped": 10,
|
| 58 |
+
"time_spent": [
|
| 59 |
+
58.51,
|
| 60 |
+
51.91
|
| 61 |
+
],
|
| 62 |
+
"failures": {},
|
| 63 |
+
"job_link": {
|
| 64 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344747",
|
| 65 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344894"
|
| 66 |
+
}
|
| 67 |
+
},
|
| 68 |
+
"models_bert": {
|
| 69 |
+
"failed": {
|
| 70 |
+
"PyTorch": {
|
| 71 |
+
"unclassified": 0,
|
| 72 |
+
"single": 0,
|
| 73 |
+
"multi": 0
|
| 74 |
+
},
|
| 75 |
+
"TensorFlow": {
|
| 76 |
+
"unclassified": 0,
|
| 77 |
+
"single": 0,
|
| 78 |
+
"multi": 0
|
| 79 |
+
},
|
| 80 |
+
"Flax": {
|
| 81 |
+
"unclassified": 0,
|
| 82 |
+
"single": 0,
|
| 83 |
+
"multi": 0
|
| 84 |
+
},
|
| 85 |
+
"Tokenizers": {
|
| 86 |
+
"unclassified": 0,
|
| 87 |
+
"single": 0,
|
| 88 |
+
"multi": 0
|
| 89 |
+
},
|
| 90 |
+
"Pipelines": {
|
| 91 |
+
"unclassified": 0,
|
| 92 |
+
"single": 0,
|
| 93 |
+
"multi": 0
|
| 94 |
+
},
|
| 95 |
+
"Trainer": {
|
| 96 |
+
"unclassified": 0,
|
| 97 |
+
"single": 0,
|
| 98 |
+
"multi": 0
|
| 99 |
+
},
|
| 100 |
+
"ONNX": {
|
| 101 |
+
"unclassified": 0,
|
| 102 |
+
"single": 0,
|
| 103 |
+
"multi": 0
|
| 104 |
+
},
|
| 105 |
+
"Auto": {
|
| 106 |
+
"unclassified": 0,
|
| 107 |
+
"single": 0,
|
| 108 |
+
"multi": 0
|
| 109 |
+
},
|
| 110 |
+
"Quantization": {
|
| 111 |
+
"unclassified": 0,
|
| 112 |
+
"single": 0,
|
| 113 |
+
"multi": 0
|
| 114 |
+
},
|
| 115 |
+
"Unclassified": {
|
| 116 |
+
"unclassified": 0,
|
| 117 |
+
"single": 0,
|
| 118 |
+
"multi": 0
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
"errors": 0,
|
| 122 |
+
"success": 531,
|
| 123 |
+
"skipped": 211,
|
| 124 |
+
"time_spent": [
|
| 125 |
+
116.02,
|
| 126 |
+
115.61
|
| 127 |
+
],
|
| 128 |
+
"failures": {},
|
| 129 |
+
"job_link": {
|
| 130 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344727",
|
| 131 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344910"
|
| 132 |
+
}
|
| 133 |
+
},
|
| 134 |
+
"models_csm": {
|
| 135 |
+
"failed": {
|
| 136 |
+
"PyTorch": {
|
| 137 |
+
"unclassified": 0,
|
| 138 |
+
"single": 3,
|
| 139 |
+
"multi": 3
|
| 140 |
+
},
|
| 141 |
+
"TensorFlow": {
|
| 142 |
+
"unclassified": 0,
|
| 143 |
+
"single": 0,
|
| 144 |
+
"multi": 0
|
| 145 |
+
},
|
| 146 |
+
"Flax": {
|
| 147 |
+
"unclassified": 0,
|
| 148 |
+
"single": 0,
|
| 149 |
+
"multi": 0
|
| 150 |
+
},
|
| 151 |
+
"Tokenizers": {
|
| 152 |
+
"unclassified": 0,
|
| 153 |
+
"single": 0,
|
| 154 |
+
"multi": 0
|
| 155 |
+
},
|
| 156 |
+
"Pipelines": {
|
| 157 |
+
"unclassified": 0,
|
| 158 |
+
"single": 0,
|
| 159 |
+
"multi": 0
|
| 160 |
+
},
|
| 161 |
+
"Trainer": {
|
| 162 |
+
"unclassified": 0,
|
| 163 |
+
"single": 0,
|
| 164 |
+
"multi": 0
|
| 165 |
+
},
|
| 166 |
+
"ONNX": {
|
| 167 |
+
"unclassified": 0,
|
| 168 |
+
"single": 0,
|
| 169 |
+
"multi": 0
|
| 170 |
+
},
|
| 171 |
+
"Auto": {
|
| 172 |
+
"unclassified": 0,
|
| 173 |
+
"single": 0,
|
| 174 |
+
"multi": 0
|
| 175 |
+
},
|
| 176 |
+
"Quantization": {
|
| 177 |
+
"unclassified": 0,
|
| 178 |
+
"single": 0,
|
| 179 |
+
"multi": 0
|
| 180 |
+
},
|
| 181 |
+
"Unclassified": {
|
| 182 |
+
"unclassified": 0,
|
| 183 |
+
"single": 0,
|
| 184 |
+
"multi": 0
|
| 185 |
+
}
|
| 186 |
+
},
|
| 187 |
+
"errors": 0,
|
| 188 |
+
"success": 226,
|
| 189 |
+
"skipped": 206,
|
| 190 |
+
"time_spent": [
|
| 191 |
+
113.15,
|
| 192 |
+
112.34
|
| 193 |
+
],
|
| 194 |
+
"failures": {
|
| 195 |
+
"multi": [
|
| 196 |
+
{
|
| 197 |
+
"line": "tests/models/csm/test_modeling_csm.py::CsmForConditionalGenerationIntegrationTest::test_1b_model_integration_generate",
|
| 198 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"line": "tests/models/csm/test_modeling_csm.py::CsmForConditionalGenerationIntegrationTest::test_1b_model_integration_generate_batched",
|
| 202 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"line": "tests/models/csm/test_modeling_csm.py::CsmForConditionalGenerationIntegrationTest::test_1b_model_integration_generate_multiple_audio",
|
| 206 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 207 |
+
}
|
| 208 |
+
],
|
| 209 |
+
"single": [
|
| 210 |
+
{
|
| 211 |
+
"line": "tests/models/csm/test_modeling_csm.py::CsmForConditionalGenerationIntegrationTest::test_1b_model_integration_generate",
|
| 212 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"line": "tests/models/csm/test_modeling_csm.py::CsmForConditionalGenerationIntegrationTest::test_1b_model_integration_generate_batched",
|
| 216 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"line": "tests/models/csm/test_modeling_csm.py::CsmForConditionalGenerationIntegrationTest::test_1b_model_integration_generate_multiple_audio",
|
| 220 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 221 |
+
}
|
| 222 |
+
]
|
| 223 |
+
},
|
| 224 |
+
"job_link": {
|
| 225 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344845",
|
| 226 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344710"
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
"models_detr": {
|
| 230 |
+
"failed": {
|
| 231 |
+
"PyTorch": {
|
| 232 |
+
"unclassified": 0,
|
| 233 |
+
"single": 0,
|
| 234 |
+
"multi": 0
|
| 235 |
+
},
|
| 236 |
+
"TensorFlow": {
|
| 237 |
+
"unclassified": 0,
|
| 238 |
+
"single": 0,
|
| 239 |
+
"multi": 0
|
| 240 |
+
},
|
| 241 |
+
"Flax": {
|
| 242 |
+
"unclassified": 0,
|
| 243 |
+
"single": 0,
|
| 244 |
+
"multi": 0
|
| 245 |
+
},
|
| 246 |
+
"Tokenizers": {
|
| 247 |
+
"unclassified": 0,
|
| 248 |
+
"single": 0,
|
| 249 |
+
"multi": 0
|
| 250 |
+
},
|
| 251 |
+
"Pipelines": {
|
| 252 |
+
"unclassified": 0,
|
| 253 |
+
"single": 0,
|
| 254 |
+
"multi": 0
|
| 255 |
+
},
|
| 256 |
+
"Trainer": {
|
| 257 |
+
"unclassified": 0,
|
| 258 |
+
"single": 0,
|
| 259 |
+
"multi": 0
|
| 260 |
+
},
|
| 261 |
+
"ONNX": {
|
| 262 |
+
"unclassified": 0,
|
| 263 |
+
"single": 0,
|
| 264 |
+
"multi": 0
|
| 265 |
+
},
|
| 266 |
+
"Auto": {
|
| 267 |
+
"unclassified": 0,
|
| 268 |
+
"single": 0,
|
| 269 |
+
"multi": 0
|
| 270 |
+
},
|
| 271 |
+
"Quantization": {
|
| 272 |
+
"unclassified": 0,
|
| 273 |
+
"single": 0,
|
| 274 |
+
"multi": 0
|
| 275 |
+
},
|
| 276 |
+
"Unclassified": {
|
| 277 |
+
"unclassified": 0,
|
| 278 |
+
"single": 0,
|
| 279 |
+
"multi": 0
|
| 280 |
+
}
|
| 281 |
+
},
|
| 282 |
+
"errors": 0,
|
| 283 |
+
"success": 195,
|
| 284 |
+
"skipped": 261,
|
| 285 |
+
"time_spent": [
|
| 286 |
+
79.28,
|
| 287 |
+
74.76
|
| 288 |
+
],
|
| 289 |
+
"failures": {},
|
| 290 |
+
"job_link": {
|
| 291 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344837",
|
| 292 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344753"
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
"models_gemma3n": {
|
| 296 |
+
"failed": {
|
| 297 |
+
"PyTorch": {
|
| 298 |
+
"unclassified": 0,
|
| 299 |
+
"single": 2,
|
| 300 |
+
"multi": 2
|
| 301 |
+
},
|
| 302 |
+
"TensorFlow": {
|
| 303 |
+
"unclassified": 0,
|
| 304 |
+
"single": 0,
|
| 305 |
+
"multi": 0
|
| 306 |
+
},
|
| 307 |
+
"Flax": {
|
| 308 |
+
"unclassified": 0,
|
| 309 |
+
"single": 0,
|
| 310 |
+
"multi": 0
|
| 311 |
+
},
|
| 312 |
+
"Tokenizers": {
|
| 313 |
+
"unclassified": 0,
|
| 314 |
+
"single": 0,
|
| 315 |
+
"multi": 0
|
| 316 |
+
},
|
| 317 |
+
"Pipelines": {
|
| 318 |
+
"unclassified": 0,
|
| 319 |
+
"single": 0,
|
| 320 |
+
"multi": 0
|
| 321 |
+
},
|
| 322 |
+
"Trainer": {
|
| 323 |
+
"unclassified": 0,
|
| 324 |
+
"single": 0,
|
| 325 |
+
"multi": 0
|
| 326 |
+
},
|
| 327 |
+
"ONNX": {
|
| 328 |
+
"unclassified": 0,
|
| 329 |
+
"single": 0,
|
| 330 |
+
"multi": 0
|
| 331 |
+
},
|
| 332 |
+
"Auto": {
|
| 333 |
+
"unclassified": 0,
|
| 334 |
+
"single": 0,
|
| 335 |
+
"multi": 0
|
| 336 |
+
},
|
| 337 |
+
"Quantization": {
|
| 338 |
+
"unclassified": 0,
|
| 339 |
+
"single": 0,
|
| 340 |
+
"multi": 0
|
| 341 |
+
},
|
| 342 |
+
"Unclassified": {
|
| 343 |
+
"unclassified": 0,
|
| 344 |
+
"single": 0,
|
| 345 |
+
"multi": 0
|
| 346 |
+
}
|
| 347 |
+
},
|
| 348 |
+
"errors": 0,
|
| 349 |
+
"success": 301,
|
| 350 |
+
"skipped": 705,
|
| 351 |
+
"time_spent": [
|
| 352 |
+
195.98,
|
| 353 |
+
196.8
|
| 354 |
+
],
|
| 355 |
+
"failures": {
|
| 356 |
+
"multi": [
|
| 357 |
+
{
|
| 358 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_fp32_ln",
|
| 359 |
+
"trace": "(line 1122) RuntimeError: result type Float can't be cast to the desired output type unsigned char"
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 363 |
+
"trace": "(line 3589) AssertionError: assert False"
|
| 364 |
+
}
|
| 365 |
+
],
|
| 366 |
+
"single": [
|
| 367 |
+
{
|
| 368 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_fp32_ln",
|
| 369 |
+
"trace": "(line 1122) RuntimeError: result type Float can't be cast to the desired output type unsigned char"
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 373 |
+
"trace": "(line 3589) AssertionError: assert False"
|
| 374 |
+
}
|
| 375 |
+
]
|
| 376 |
+
},
|
| 377 |
+
"job_link": {
|
| 378 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344881",
|
| 379 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344719"
|
| 380 |
+
}
|
| 381 |
+
},
|
| 382 |
+
"models_got_ocr2": {
|
| 383 |
+
"failed": {
|
| 384 |
+
"PyTorch": {
|
| 385 |
+
"unclassified": 0,
|
| 386 |
+
"single": 0,
|
| 387 |
+
"multi": 0
|
| 388 |
+
},
|
| 389 |
+
"TensorFlow": {
|
| 390 |
+
"unclassified": 0,
|
| 391 |
+
"single": 0,
|
| 392 |
+
"multi": 0
|
| 393 |
+
},
|
| 394 |
+
"Flax": {
|
| 395 |
+
"unclassified": 0,
|
| 396 |
+
"single": 0,
|
| 397 |
+
"multi": 0
|
| 398 |
+
},
|
| 399 |
+
"Tokenizers": {
|
| 400 |
+
"unclassified": 0,
|
| 401 |
+
"single": 0,
|
| 402 |
+
"multi": 0
|
| 403 |
+
},
|
| 404 |
+
"Pipelines": {
|
| 405 |
+
"unclassified": 0,
|
| 406 |
+
"single": 0,
|
| 407 |
+
"multi": 0
|
| 408 |
+
},
|
| 409 |
+
"Trainer": {
|
| 410 |
+
"unclassified": 0,
|
| 411 |
+
"single": 0,
|
| 412 |
+
"multi": 0
|
| 413 |
+
},
|
| 414 |
+
"ONNX": {
|
| 415 |
+
"unclassified": 0,
|
| 416 |
+
"single": 0,
|
| 417 |
+
"multi": 0
|
| 418 |
+
},
|
| 419 |
+
"Auto": {
|
| 420 |
+
"unclassified": 0,
|
| 421 |
+
"single": 0,
|
| 422 |
+
"multi": 0
|
| 423 |
+
},
|
| 424 |
+
"Quantization": {
|
| 425 |
+
"unclassified": 0,
|
| 426 |
+
"single": 0,
|
| 427 |
+
"multi": 0
|
| 428 |
+
},
|
| 429 |
+
"Unclassified": {
|
| 430 |
+
"unclassified": 0,
|
| 431 |
+
"single": 0,
|
| 432 |
+
"multi": 0
|
| 433 |
+
}
|
| 434 |
+
},
|
| 435 |
+
"errors": 0,
|
| 436 |
+
"success": 265,
|
| 437 |
+
"skipped": 337,
|
| 438 |
+
"time_spent": [
|
| 439 |
+
131.89,
|
| 440 |
+
130.27
|
| 441 |
+
],
|
| 442 |
+
"failures": {},
|
| 443 |
+
"job_link": {
|
| 444 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344828",
|
| 445 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344760"
|
| 446 |
+
}
|
| 447 |
+
},
|
| 448 |
+
"models_gpt2": {
|
| 449 |
+
"failed": {
|
| 450 |
+
"PyTorch": {
|
| 451 |
+
"unclassified": 0,
|
| 452 |
+
"single": 1,
|
| 453 |
+
"multi": 1
|
| 454 |
+
},
|
| 455 |
+
"TensorFlow": {
|
| 456 |
+
"unclassified": 0,
|
| 457 |
+
"single": 0,
|
| 458 |
+
"multi": 0
|
| 459 |
+
},
|
| 460 |
+
"Flax": {
|
| 461 |
+
"unclassified": 0,
|
| 462 |
+
"single": 0,
|
| 463 |
+
"multi": 0
|
| 464 |
+
},
|
| 465 |
+
"Tokenizers": {
|
| 466 |
+
"unclassified": 0,
|
| 467 |
+
"single": 0,
|
| 468 |
+
"multi": 0
|
| 469 |
+
},
|
| 470 |
+
"Pipelines": {
|
| 471 |
+
"unclassified": 0,
|
| 472 |
+
"single": 0,
|
| 473 |
+
"multi": 0
|
| 474 |
+
},
|
| 475 |
+
"Trainer": {
|
| 476 |
+
"unclassified": 0,
|
| 477 |
+
"single": 0,
|
| 478 |
+
"multi": 0
|
| 479 |
+
},
|
| 480 |
+
"ONNX": {
|
| 481 |
+
"unclassified": 0,
|
| 482 |
+
"single": 0,
|
| 483 |
+
"multi": 0
|
| 484 |
+
},
|
| 485 |
+
"Auto": {
|
| 486 |
+
"unclassified": 0,
|
| 487 |
+
"single": 0,
|
| 488 |
+
"multi": 0
|
| 489 |
+
},
|
| 490 |
+
"Quantization": {
|
| 491 |
+
"unclassified": 0,
|
| 492 |
+
"single": 0,
|
| 493 |
+
"multi": 0
|
| 494 |
+
},
|
| 495 |
+
"Unclassified": {
|
| 496 |
+
"unclassified": 0,
|
| 497 |
+
"single": 0,
|
| 498 |
+
"multi": 0
|
| 499 |
+
}
|
| 500 |
+
},
|
| 501 |
+
"errors": 0,
|
| 502 |
+
"success": 503,
|
| 503 |
+
"skipped": 217,
|
| 504 |
+
"time_spent": [
|
| 505 |
+
125.68,
|
| 506 |
+
135.41
|
| 507 |
+
],
|
| 508 |
+
"failures": {
|
| 509 |
+
"single": [
|
| 510 |
+
{
|
| 511 |
+
"line": "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelLanguageGenerationTest::test_flash_attn_2_generate_padding_left",
|
| 512 |
+
"trace": "(line 675) AssertionError: Lists differ: ['<|e[141 chars]ta', \"Hello this is a very long sentence. I'm [46 chars]rry\"] != ['<|e[141 chars]ta', 'Hello this is a very long sentence very [91 chars]ong']"
|
| 513 |
+
}
|
| 514 |
+
],
|
| 515 |
+
"multi": [
|
| 516 |
+
{
|
| 517 |
+
"line": "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelLanguageGenerationTest::test_flash_attn_2_generate_padding_left",
|
| 518 |
+
"trace": "(line 675) AssertionError: Lists differ: ['<|e[141 chars]ta', \"Hello this is a very long sentence. I'm [46 chars]rry\"] != ['<|e[141 chars]ta', 'Hello this is a very long sentence very [91 chars]ong']"
|
| 519 |
+
}
|
| 520 |
+
]
|
| 521 |
+
},
|
| 522 |
+
"job_link": {
|
| 523 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344735",
|
| 524 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344888"
|
| 525 |
+
}
|
| 526 |
+
},
|
| 527 |
+
"models_internvl": {
|
| 528 |
+
"failed": {
|
| 529 |
+
"PyTorch": {
|
| 530 |
+
"unclassified": 0,
|
| 531 |
+
"single": 1,
|
| 532 |
+
"multi": 1
|
| 533 |
+
},
|
| 534 |
+
"TensorFlow": {
|
| 535 |
+
"unclassified": 0,
|
| 536 |
+
"single": 0,
|
| 537 |
+
"multi": 0
|
| 538 |
+
},
|
| 539 |
+
"Flax": {
|
| 540 |
+
"unclassified": 0,
|
| 541 |
+
"single": 0,
|
| 542 |
+
"multi": 0
|
| 543 |
+
},
|
| 544 |
+
"Tokenizers": {
|
| 545 |
+
"unclassified": 0,
|
| 546 |
+
"single": 0,
|
| 547 |
+
"multi": 0
|
| 548 |
+
},
|
| 549 |
+
"Pipelines": {
|
| 550 |
+
"unclassified": 0,
|
| 551 |
+
"single": 0,
|
| 552 |
+
"multi": 0
|
| 553 |
+
},
|
| 554 |
+
"Trainer": {
|
| 555 |
+
"unclassified": 0,
|
| 556 |
+
"single": 0,
|
| 557 |
+
"multi": 0
|
| 558 |
+
},
|
| 559 |
+
"ONNX": {
|
| 560 |
+
"unclassified": 0,
|
| 561 |
+
"single": 0,
|
| 562 |
+
"multi": 0
|
| 563 |
+
},
|
| 564 |
+
"Auto": {
|
| 565 |
+
"unclassified": 0,
|
| 566 |
+
"single": 0,
|
| 567 |
+
"multi": 0
|
| 568 |
+
},
|
| 569 |
+
"Quantization": {
|
| 570 |
+
"unclassified": 0,
|
| 571 |
+
"single": 0,
|
| 572 |
+
"multi": 0
|
| 573 |
+
},
|
| 574 |
+
"Unclassified": {
|
| 575 |
+
"unclassified": 0,
|
| 576 |
+
"single": 1,
|
| 577 |
+
"multi": 1
|
| 578 |
+
}
|
| 579 |
+
},
|
| 580 |
+
"errors": 0,
|
| 581 |
+
"success": 369,
|
| 582 |
+
"skipped": 235,
|
| 583 |
+
"time_spent": [
|
| 584 |
+
294.25,
|
| 585 |
+
296.66
|
| 586 |
+
],
|
| 587 |
+
"failures": {
|
| 588 |
+
"multi": [
|
| 589 |
+
{
|
| 590 |
+
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads",
|
| 591 |
+
"trace": "(line 573) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 106496 Hardware limit:101376 Reducing block sizes or `num_stages` may help."
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"line": "tests/models/internvl/test_processing_internvl.py::InternVLProcessorTest::test_model_input_names",
|
| 595 |
+
"trace": "(line 250) ValueError: Number of video placeholders in the prompt does not match the number of videos."
|
| 596 |
+
}
|
| 597 |
+
],
|
| 598 |
+
"single": [
|
| 599 |
+
{
|
| 600 |
+
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads",
|
| 601 |
+
"trace": "(line 573) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 106496 Hardware limit:101376 Reducing block sizes or `num_stages` may help."
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"line": "tests/models/internvl/test_processing_internvl.py::InternVLProcessorTest::test_model_input_names",
|
| 605 |
+
"trace": "(line 250) ValueError: Number of video placeholders in the prompt does not match the number of videos."
|
| 606 |
+
}
|
| 607 |
+
]
|
| 608 |
+
},
|
| 609 |
+
"job_link": {
|
| 610 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344860",
|
| 611 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344772"
|
| 612 |
+
}
|
| 613 |
+
},
|
| 614 |
+
"models_llama": {
|
| 615 |
+
"failed": {
|
| 616 |
+
"PyTorch": {
|
| 617 |
+
"unclassified": 0,
|
| 618 |
+
"single": 0,
|
| 619 |
+
"multi": 0
|
| 620 |
+
},
|
| 621 |
+
"TensorFlow": {
|
| 622 |
+
"unclassified": 0,
|
| 623 |
+
"single": 0,
|
| 624 |
+
"multi": 0
|
| 625 |
+
},
|
| 626 |
+
"Flax": {
|
| 627 |
+
"unclassified": 0,
|
| 628 |
+
"single": 0,
|
| 629 |
+
"multi": 0
|
| 630 |
+
},
|
| 631 |
+
"Tokenizers": {
|
| 632 |
+
"unclassified": 0,
|
| 633 |
+
"single": 0,
|
| 634 |
+
"multi": 0
|
| 635 |
+
},
|
| 636 |
+
"Pipelines": {
|
| 637 |
+
"unclassified": 0,
|
| 638 |
+
"single": 0,
|
| 639 |
+
"multi": 0
|
| 640 |
+
},
|
| 641 |
+
"Trainer": {
|
| 642 |
+
"unclassified": 0,
|
| 643 |
+
"single": 0,
|
| 644 |
+
"multi": 0
|
| 645 |
+
},
|
| 646 |
+
"ONNX": {
|
| 647 |
+
"unclassified": 0,
|
| 648 |
+
"single": 0,
|
| 649 |
+
"multi": 0
|
| 650 |
+
},
|
| 651 |
+
"Auto": {
|
| 652 |
+
"unclassified": 0,
|
| 653 |
+
"single": 0,
|
| 654 |
+
"multi": 0
|
| 655 |
+
},
|
| 656 |
+
"Quantization": {
|
| 657 |
+
"unclassified": 0,
|
| 658 |
+
"single": 0,
|
| 659 |
+
"multi": 0
|
| 660 |
+
},
|
| 661 |
+
"Unclassified": {
|
| 662 |
+
"unclassified": 0,
|
| 663 |
+
"single": 0,
|
| 664 |
+
"multi": 0
|
| 665 |
+
}
|
| 666 |
+
},
|
| 667 |
+
"errors": 0,
|
| 668 |
+
"success": 505,
|
| 669 |
+
"skipped": 235,
|
| 670 |
+
"time_spent": [
|
| 671 |
+
262.64,
|
| 672 |
+
253.16
|
| 673 |
+
],
|
| 674 |
+
"failures": {},
|
| 675 |
+
"job_link": {
|
| 676 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344731",
|
| 677 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344854"
|
| 678 |
+
}
|
| 679 |
+
},
|
| 680 |
+
"models_mistral3": {
|
| 681 |
+
"failed": {
|
| 682 |
+
"PyTorch": {
|
| 683 |
+
"unclassified": 0,
|
| 684 |
+
"single": 2,
|
| 685 |
+
"multi": 2
|
| 686 |
+
},
|
| 687 |
+
"TensorFlow": {
|
| 688 |
+
"unclassified": 0,
|
| 689 |
+
"single": 0,
|
| 690 |
+
"multi": 0
|
| 691 |
+
},
|
| 692 |
+
"Flax": {
|
| 693 |
+
"unclassified": 0,
|
| 694 |
+
"single": 0,
|
| 695 |
+
"multi": 0
|
| 696 |
+
},
|
| 697 |
+
"Tokenizers": {
|
| 698 |
+
"unclassified": 0,
|
| 699 |
+
"single": 0,
|
| 700 |
+
"multi": 0
|
| 701 |
+
},
|
| 702 |
+
"Pipelines": {
|
| 703 |
+
"unclassified": 0,
|
| 704 |
+
"single": 0,
|
| 705 |
+
"multi": 0
|
| 706 |
+
},
|
| 707 |
+
"Trainer": {
|
| 708 |
+
"unclassified": 0,
|
| 709 |
+
"single": 0,
|
| 710 |
+
"multi": 0
|
| 711 |
+
},
|
| 712 |
+
"ONNX": {
|
| 713 |
+
"unclassified": 0,
|
| 714 |
+
"single": 0,
|
| 715 |
+
"multi": 0
|
| 716 |
+
},
|
| 717 |
+
"Auto": {
|
| 718 |
+
"unclassified": 0,
|
| 719 |
+
"single": 0,
|
| 720 |
+
"multi": 0
|
| 721 |
+
},
|
| 722 |
+
"Quantization": {
|
| 723 |
+
"unclassified": 0,
|
| 724 |
+
"single": 0,
|
| 725 |
+
"multi": 0
|
| 726 |
+
},
|
| 727 |
+
"Unclassified": {
|
| 728 |
+
"unclassified": 0,
|
| 729 |
+
"single": 0,
|
| 730 |
+
"multi": 0
|
| 731 |
+
}
|
| 732 |
+
},
|
| 733 |
+
"errors": 0,
|
| 734 |
+
"success": 293,
|
| 735 |
+
"skipped": 263,
|
| 736 |
+
"time_spent": [
|
| 737 |
+
607.51,
|
| 738 |
+
588.2
|
| 739 |
+
],
|
| 740 |
+
"failures": {
|
| 741 |
+
"single": [
|
| 742 |
+
{
|
| 743 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate",
|
| 744 |
+
"trace": "(line 675) AssertionError: 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the woods' != \"Wooden path to calm,\\nReflections whisper secrets,\\nNature's peace unfolds.\""
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate_multi_image",
|
| 748 |
+
"trace": "(line 675) AssertionError: \"Calm waters reflect\\nWooden path to distant shore\\nPeace in nature's hold\" != 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the scene'"
|
| 749 |
+
}
|
| 750 |
+
],
|
| 751 |
+
"multi": [
|
| 752 |
+
{
|
| 753 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate",
|
| 754 |
+
"trace": "(line 675) AssertionError: 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the woods' != \"Wooden path to calm,\\nReflections whisper secrets,\\nNature's peace unfolds.\""
|
| 755 |
+
},
|
| 756 |
+
{
|
| 757 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate_multi_image",
|
| 758 |
+
"trace": "(line 675) AssertionError: \"Calm waters reflect\\nWooden path to distant shore\\nPeace in nature's hold\" != 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the scene'"
|
| 759 |
+
}
|
| 760 |
+
]
|
| 761 |
+
},
|
| 762 |
+
"job_link": {
|
| 763 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344780",
|
| 764 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344858"
|
| 765 |
+
}
|
| 766 |
+
},
|
| 767 |
+
"models_modernbert": {
|
| 768 |
+
"failed": {
|
| 769 |
+
"PyTorch": {
|
| 770 |
+
"unclassified": 0,
|
| 771 |
+
"single": 1,
|
| 772 |
+
"multi": 1
|
| 773 |
+
},
|
| 774 |
+
"TensorFlow": {
|
| 775 |
+
"unclassified": 0,
|
| 776 |
+
"single": 0,
|
| 777 |
+
"multi": 0
|
| 778 |
+
},
|
| 779 |
+
"Flax": {
|
| 780 |
+
"unclassified": 0,
|
| 781 |
+
"single": 0,
|
| 782 |
+
"multi": 0
|
| 783 |
+
},
|
| 784 |
+
"Tokenizers": {
|
| 785 |
+
"unclassified": 0,
|
| 786 |
+
"single": 0,
|
| 787 |
+
"multi": 0
|
| 788 |
+
},
|
| 789 |
+
"Pipelines": {
|
| 790 |
+
"unclassified": 0,
|
| 791 |
+
"single": 0,
|
| 792 |
+
"multi": 0
|
| 793 |
+
},
|
| 794 |
+
"Trainer": {
|
| 795 |
+
"unclassified": 0,
|
| 796 |
+
"single": 0,
|
| 797 |
+
"multi": 0
|
| 798 |
+
},
|
| 799 |
+
"ONNX": {
|
| 800 |
+
"unclassified": 0,
|
| 801 |
+
"single": 0,
|
| 802 |
+
"multi": 0
|
| 803 |
+
},
|
| 804 |
+
"Auto": {
|
| 805 |
+
"unclassified": 0,
|
| 806 |
+
"single": 0,
|
| 807 |
+
"multi": 0
|
| 808 |
+
},
|
| 809 |
+
"Quantization": {
|
| 810 |
+
"unclassified": 0,
|
| 811 |
+
"single": 0,
|
| 812 |
+
"multi": 0
|
| 813 |
+
},
|
| 814 |
+
"Unclassified": {
|
| 815 |
+
"unclassified": 0,
|
| 816 |
+
"single": 0,
|
| 817 |
+
"multi": 0
|
| 818 |
+
}
|
| 819 |
+
},
|
| 820 |
+
"errors": 0,
|
| 821 |
+
"success": 192,
|
| 822 |
+
"skipped": 206,
|
| 823 |
+
"time_spent": [
|
| 824 |
+
111.7,
|
| 825 |
+
109.02
|
| 826 |
+
],
|
| 827 |
+
"failures": {
|
| 828 |
+
"multi": [
|
| 829 |
+
{
|
| 830 |
+
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelTest::test_flash_attn_2_inference_equivalence",
|
| 831 |
+
"trace": "(line 1515) RuntimeError: shape '[-1, 7]' is invalid for input of size 1"
|
| 832 |
+
}
|
| 833 |
+
],
|
| 834 |
+
"single": [
|
| 835 |
+
{
|
| 836 |
+
"line": "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelTest::test_flash_attn_2_inference_equivalence",
|
| 837 |
+
"trace": "(line 1515) RuntimeError: shape '[-1, 7]' is invalid for input of size 1"
|
| 838 |
+
}
|
| 839 |
+
]
|
| 840 |
+
},
|
| 841 |
+
"job_link": {
|
| 842 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344902",
|
| 843 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344763"
|
| 844 |
+
}
|
| 845 |
+
},
|
| 846 |
+
"models_qwen2": {
|
| 847 |
+
"failed": {
|
| 848 |
+
"PyTorch": {
|
| 849 |
+
"unclassified": 0,
|
| 850 |
+
"single": 1,
|
| 851 |
+
"multi": 1
|
| 852 |
+
},
|
| 853 |
+
"TensorFlow": {
|
| 854 |
+
"unclassified": 0,
|
| 855 |
+
"single": 0,
|
| 856 |
+
"multi": 0
|
| 857 |
+
},
|
| 858 |
+
"Flax": {
|
| 859 |
+
"unclassified": 0,
|
| 860 |
+
"single": 0,
|
| 861 |
+
"multi": 0
|
| 862 |
+
},
|
| 863 |
+
"Tokenizers": {
|
| 864 |
+
"unclassified": 0,
|
| 865 |
+
"single": 0,
|
| 866 |
+
"multi": 0
|
| 867 |
+
},
|
| 868 |
+
"Pipelines": {
|
| 869 |
+
"unclassified": 0,
|
| 870 |
+
"single": 0,
|
| 871 |
+
"multi": 0
|
| 872 |
+
},
|
| 873 |
+
"Trainer": {
|
| 874 |
+
"unclassified": 0,
|
| 875 |
+
"single": 0,
|
| 876 |
+
"multi": 0
|
| 877 |
+
},
|
| 878 |
+
"ONNX": {
|
| 879 |
+
"unclassified": 0,
|
| 880 |
+
"single": 0,
|
| 881 |
+
"multi": 0
|
| 882 |
+
},
|
| 883 |
+
"Auto": {
|
| 884 |
+
"unclassified": 0,
|
| 885 |
+
"single": 0,
|
| 886 |
+
"multi": 0
|
| 887 |
+
},
|
| 888 |
+
"Quantization": {
|
| 889 |
+
"unclassified": 0,
|
| 890 |
+
"single": 0,
|
| 891 |
+
"multi": 0
|
| 892 |
+
},
|
| 893 |
+
"Unclassified": {
|
| 894 |
+
"unclassified": 0,
|
| 895 |
+
"single": 0,
|
| 896 |
+
"multi": 0
|
| 897 |
+
}
|
| 898 |
+
},
|
| 899 |
+
"errors": 0,
|
| 900 |
+
"success": 465,
|
| 901 |
+
"skipped": 233,
|
| 902 |
+
"time_spent": [
|
| 903 |
+
179.85,
|
| 904 |
+
177.76
|
| 905 |
+
],
|
| 906 |
+
"failures": {
|
| 907 |
+
"multi": [
|
| 908 |
+
{
|
| 909 |
+
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_3b_generation",
|
| 910 |
+
"trace": "(line 83) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.16 GiB. GPU 0 has a total capacity of 22.18 GiB of which 130.50 MiB is free. Process 22524 has 22.05 GiB memory in use. Of the allocated memory 21.55 GiB is allocated by PyTorch, and 21.23 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 911 |
+
}
|
| 912 |
+
],
|
| 913 |
+
"single": [
|
| 914 |
+
{
|
| 915 |
+
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_3b_generation",
|
| 916 |
+
"trace": "(line 83) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.16 GiB. GPU 0 has a total capacity of 22.18 GiB of which 238.50 MiB is free. Process 18163 has 21.95 GiB memory in use. Of the allocated memory 21.55 GiB is allocated by PyTorch, and 21.23 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 917 |
+
}
|
| 918 |
+
]
|
| 919 |
+
},
|
| 920 |
+
"job_link": {
|
| 921 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344909",
|
| 922 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344771"
|
| 923 |
+
}
|
| 924 |
+
},
|
| 925 |
+
"models_qwen2_5_omni": {
|
| 926 |
+
"failed": {
|
| 927 |
+
"PyTorch": {
|
| 928 |
+
"unclassified": 0,
|
| 929 |
+
"single": 3,
|
| 930 |
+
"multi": 5
|
| 931 |
+
},
|
| 932 |
+
"TensorFlow": {
|
| 933 |
+
"unclassified": 0,
|
| 934 |
+
"single": 0,
|
| 935 |
+
"multi": 0
|
| 936 |
+
},
|
| 937 |
+
"Flax": {
|
| 938 |
+
"unclassified": 0,
|
| 939 |
+
"single": 0,
|
| 940 |
+
"multi": 0
|
| 941 |
+
},
|
| 942 |
+
"Tokenizers": {
|
| 943 |
+
"unclassified": 0,
|
| 944 |
+
"single": 0,
|
| 945 |
+
"multi": 0
|
| 946 |
+
},
|
| 947 |
+
"Pipelines": {
|
| 948 |
+
"unclassified": 0,
|
| 949 |
+
"single": 0,
|
| 950 |
+
"multi": 0
|
| 951 |
+
},
|
| 952 |
+
"Trainer": {
|
| 953 |
+
"unclassified": 0,
|
| 954 |
+
"single": 0,
|
| 955 |
+
"multi": 0
|
| 956 |
+
},
|
| 957 |
+
"ONNX": {
|
| 958 |
+
"unclassified": 0,
|
| 959 |
+
"single": 0,
|
| 960 |
+
"multi": 0
|
| 961 |
+
},
|
| 962 |
+
"Auto": {
|
| 963 |
+
"unclassified": 0,
|
| 964 |
+
"single": 0,
|
| 965 |
+
"multi": 0
|
| 966 |
+
},
|
| 967 |
+
"Quantization": {
|
| 968 |
+
"unclassified": 0,
|
| 969 |
+
"single": 0,
|
| 970 |
+
"multi": 0
|
| 971 |
+
},
|
| 972 |
+
"Unclassified": {
|
| 973 |
+
"unclassified": 0,
|
| 974 |
+
"single": 0,
|
| 975 |
+
"multi": 0
|
| 976 |
+
}
|
| 977 |
+
},
|
| 978 |
+
"errors": 0,
|
| 979 |
+
"success": 297,
|
| 980 |
+
"skipped": 141,
|
| 981 |
+
"time_spent": [
|
| 982 |
+
190.96,
|
| 983 |
+
209.46
|
| 984 |
+
],
|
| 985 |
+
"failures": {
|
| 986 |
+
"multi": [
|
| 987 |
+
{
|
| 988 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
|
| 989 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 990 |
+
},
|
| 991 |
+
{
|
| 992 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 993 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 994 |
+
},
|
| 995 |
+
{
|
| 996 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch_flashatt2",
|
| 997 |
+
"trace": "(line 675) AssertionError: Lists differ: [\"sys[139 chars] dog is a Labrador Retriever.\", \"system\\nYou a[155 chars]er.\"] != [\"sys[139 chars] dog appears to be a Labrador Retriever.\", \"sy[177 chars]er.\"]"
|
| 998 |
+
},
|
| 999 |
+
{
|
| 1000 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_multiturn",
|
| 1001 |
+
"trace": "(line 868) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacity of 22.18 GiB of which 16.50 MiB is free. Process 22400 has 22.16 GiB memory in use. Of the allocated memory 21.72 GiB is allocated by PyTorch, and 28.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_w_audio",
|
| 1005 |
+
"trace": "(line 199) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacity of 22.18 GiB of which 18.50 MiB is free. Process 22400 has 22.16 GiB memory in use. Of the allocated memory 21.73 GiB is allocated by PyTorch, and 16.33 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 1006 |
+
}
|
| 1007 |
+
],
|
| 1008 |
+
"single": [
|
| 1009 |
+
{
|
| 1010 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
|
| 1011 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 1012 |
+
},
|
| 1013 |
+
{
|
| 1014 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 1015 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch_flashatt2",
|
| 1019 |
+
"trace": "(line 675) AssertionError: Lists differ: [\"sys[139 chars] dog is a Labrador Retriever.\", \"system\\nYou a[155 chars]er.\"] != [\"sys[139 chars] dog appears to be a Labrador Retriever.\", \"sy[177 chars]er.\"]"
|
| 1020 |
+
}
|
| 1021 |
+
]
|
| 1022 |
+
},
|
| 1023 |
+
"job_link": {
|
| 1024 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344876",
|
| 1025 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344779"
|
| 1026 |
+
}
|
| 1027 |
+
},
|
| 1028 |
+
"models_qwen2_5_vl": {
|
| 1029 |
+
"failed": {
|
| 1030 |
+
"PyTorch": {
|
| 1031 |
+
"unclassified": 0,
|
| 1032 |
+
"single": 4,
|
| 1033 |
+
"multi": 4
|
| 1034 |
+
},
|
| 1035 |
+
"TensorFlow": {
|
| 1036 |
+
"unclassified": 0,
|
| 1037 |
+
"single": 0,
|
| 1038 |
+
"multi": 0
|
| 1039 |
+
},
|
| 1040 |
+
"Flax": {
|
| 1041 |
+
"unclassified": 0,
|
| 1042 |
+
"single": 0,
|
| 1043 |
+
"multi": 0
|
| 1044 |
+
},
|
| 1045 |
+
"Tokenizers": {
|
| 1046 |
+
"unclassified": 0,
|
| 1047 |
+
"single": 0,
|
| 1048 |
+
"multi": 0
|
| 1049 |
+
},
|
| 1050 |
+
"Pipelines": {
|
| 1051 |
+
"unclassified": 0,
|
| 1052 |
+
"single": 0,
|
| 1053 |
+
"multi": 0
|
| 1054 |
+
},
|
| 1055 |
+
"Trainer": {
|
| 1056 |
+
"unclassified": 0,
|
| 1057 |
+
"single": 0,
|
| 1058 |
+
"multi": 0
|
| 1059 |
+
},
|
| 1060 |
+
"ONNX": {
|
| 1061 |
+
"unclassified": 0,
|
| 1062 |
+
"single": 0,
|
| 1063 |
+
"multi": 0
|
| 1064 |
+
},
|
| 1065 |
+
"Auto": {
|
| 1066 |
+
"unclassified": 0,
|
| 1067 |
+
"single": 0,
|
| 1068 |
+
"multi": 0
|
| 1069 |
+
},
|
| 1070 |
+
"Quantization": {
|
| 1071 |
+
"unclassified": 0,
|
| 1072 |
+
"single": 0,
|
| 1073 |
+
"multi": 0
|
| 1074 |
+
},
|
| 1075 |
+
"Unclassified": {
|
| 1076 |
+
"unclassified": 0,
|
| 1077 |
+
"single": 0,
|
| 1078 |
+
"multi": 0
|
| 1079 |
+
}
|
| 1080 |
+
},
|
| 1081 |
+
"errors": 0,
|
| 1082 |
+
"success": 329,
|
| 1083 |
+
"skipped": 123,
|
| 1084 |
+
"time_spent": [
|
| 1085 |
+
243.55,
|
| 1086 |
+
239.93
|
| 1087 |
+
],
|
| 1088 |
+
"failures": {
|
| 1089 |
+
"multi": [
|
| 1090 |
+
{
|
| 1091 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLModelTest::test_flash_attn_2_inference_equivalence",
|
| 1092 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[2, 1, 1, 2, 1, 1]"
|
| 1093 |
+
},
|
| 1094 |
+
{
|
| 1095 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 1096 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[2, 1, 1, 2, 1, 1]"
|
| 1097 |
+
},
|
| 1098 |
+
{
|
| 1099 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_flashatt2",
|
| 1100 |
+
"trace": "(line 675) AssertionError: Lists differ: ['sys[176 chars] and energetic nature, which is evident in', '[218 chars] in'] != ['sys[176 chars] and intelligent nature, making them popular p[232 chars]ets']"
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_wo_image_flashatt2",
|
| 1104 |
+
"trace": "(line 675) AssertionError: Lists differ: ['sys[176 chars] and energetic nature, which is evident in', '[69 chars]aks'] != ['sys[176 chars] and intelligent nature, making them popular p[224 chars]ics']"
|
| 1105 |
+
}
|
| 1106 |
+
],
|
| 1107 |
+
"single": [
|
| 1108 |
+
{
|
| 1109 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLModelTest::test_flash_attn_2_inference_equivalence",
|
| 1110 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[2, 1, 1, 2, 1, 1]"
|
| 1111 |
+
},
|
| 1112 |
+
{
|
| 1113 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 1114 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[2, 1, 1, 2, 1, 1]"
|
| 1115 |
+
},
|
| 1116 |
+
{
|
| 1117 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_flashatt2",
|
| 1118 |
+
"trace": "(line 675) AssertionError: Lists differ: ['sys[176 chars] and energetic nature, which is evident in', '[218 chars] in'] != ['sys[176 chars] and intelligent nature, making them popular p[232 chars]ets']"
|
| 1119 |
+
},
|
| 1120 |
+
{
|
| 1121 |
+
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_wo_image_flashatt2",
|
| 1122 |
+
"trace": "(line 675) AssertionError: Lists differ: ['sys[176 chars] and energetic nature, which is evident in', '[69 chars]aks'] != ['sys[176 chars] and intelligent nature, making them popular p[224 chars]ics']"
|
| 1123 |
+
}
|
| 1124 |
+
]
|
| 1125 |
+
},
|
| 1126 |
+
"job_link": {
|
| 1127 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344908",
|
| 1128 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344798"
|
| 1129 |
+
}
|
| 1130 |
+
},
|
| 1131 |
+
"models_qwen2_audio": {
|
| 1132 |
+
"failed": {
|
| 1133 |
+
"PyTorch": {
|
| 1134 |
+
"unclassified": 0,
|
| 1135 |
+
"single": 0,
|
| 1136 |
+
"multi": 4
|
| 1137 |
+
},
|
| 1138 |
+
"TensorFlow": {
|
| 1139 |
+
"unclassified": 0,
|
| 1140 |
+
"single": 0,
|
| 1141 |
+
"multi": 0
|
| 1142 |
+
},
|
| 1143 |
+
"Flax": {
|
| 1144 |
+
"unclassified": 0,
|
| 1145 |
+
"single": 0,
|
| 1146 |
+
"multi": 0
|
| 1147 |
+
},
|
| 1148 |
+
"Tokenizers": {
|
| 1149 |
+
"unclassified": 0,
|
| 1150 |
+
"single": 0,
|
| 1151 |
+
"multi": 0
|
| 1152 |
+
},
|
| 1153 |
+
"Pipelines": {
|
| 1154 |
+
"unclassified": 0,
|
| 1155 |
+
"single": 0,
|
| 1156 |
+
"multi": 0
|
| 1157 |
+
},
|
| 1158 |
+
"Trainer": {
|
| 1159 |
+
"unclassified": 0,
|
| 1160 |
+
"single": 0,
|
| 1161 |
+
"multi": 0
|
| 1162 |
+
},
|
| 1163 |
+
"ONNX": {
|
| 1164 |
+
"unclassified": 0,
|
| 1165 |
+
"single": 0,
|
| 1166 |
+
"multi": 0
|
| 1167 |
+
},
|
| 1168 |
+
"Auto": {
|
| 1169 |
+
"unclassified": 0,
|
| 1170 |
+
"single": 0,
|
| 1171 |
+
"multi": 0
|
| 1172 |
+
},
|
| 1173 |
+
"Quantization": {
|
| 1174 |
+
"unclassified": 0,
|
| 1175 |
+
"single": 0,
|
| 1176 |
+
"multi": 0
|
| 1177 |
+
},
|
| 1178 |
+
"Unclassified": {
|
| 1179 |
+
"unclassified": 0,
|
| 1180 |
+
"single": 0,
|
| 1181 |
+
"multi": 1
|
| 1182 |
+
}
|
| 1183 |
+
},
|
| 1184 |
+
"errors": 0,
|
| 1185 |
+
"success": 131,
|
| 1186 |
+
"skipped": 85,
|
| 1187 |
+
"time_spent": [
|
| 1188 |
+
161.76
|
| 1189 |
+
],
|
| 1190 |
+
"failures": {
|
| 1191 |
+
"multi": [
|
| 1192 |
+
{
|
| 1193 |
+
"line": "tests/models/qwen2_audio/test_modeling_qwen2_audio.py::Qwen2AudioForConditionalGenerationModelTest::test_eager_matches_fa2_generate",
|
| 1194 |
+
"trace": "(line 165) RuntimeError: cu_seqlens_q must have shape (batch_size + 1)"
|
| 1195 |
+
},
|
| 1196 |
+
{
|
| 1197 |
+
"line": "tests/models/qwen2_audio/test_modeling_qwen2_audio.py::Qwen2AudioForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
|
| 1198 |
+
"trace": "(line 675) AssertionError: Lists differ: [\"sys[164 chars]ant\\nI can hear the sound of glass shattering.[155 chars]n.\"'] != [\"sys[164 chars]ant\\ncough and throat clearing.\", \"system\\nYou[210 chars]l.'\"]"
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"line": "tests/models/qwen2_audio/test_modeling_qwen2_audio.py::Qwen2AudioForConditionalGenerationIntegrationTest::test_small_model_integration_test_multiturn",
|
| 1202 |
+
"trace": "(line 675) AssertionError: Lists differ: [\"sys[146 chars]t this one?\\nassistant\\nThis is the sound of liquid dripping.\"] != [\"sys[146 chars]t this one?\\nassistant\\nThroat clearing.\"]"
|
| 1203 |
+
},
|
| 1204 |
+
{
|
| 1205 |
+
"line": "tests/models/qwen2_audio/test_modeling_qwen2_audio.py::Qwen2AudioForConditionalGenerationIntegrationTest::test_small_model_integration_test_single",
|
| 1206 |
+
"trace": "(line 687) AssertionError: False is not true"
|
| 1207 |
+
},
|
| 1208 |
+
{
|
| 1209 |
+
"line": "tests/models/qwen2_audio/test_processing_qwen2_audio.py::Qwen2AudioProcessorTest::test_model_input_names",
|
| 1210 |
+
"trace": "(line 120) ValueError: Found 0 <|AUDIO|> token in provided text but received 1 audio"
|
| 1211 |
+
}
|
| 1212 |
+
]
|
| 1213 |
+
},
|
| 1214 |
+
"job_link": {
|
| 1215 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344857"
|
| 1216 |
+
}
|
| 1217 |
+
},
|
| 1218 |
+
"models_smolvlm": {
|
| 1219 |
+
"failed": {
|
| 1220 |
+
"PyTorch": {
|
| 1221 |
+
"unclassified": 0,
|
| 1222 |
+
"single": 1,
|
| 1223 |
+
"multi": 1
|
| 1224 |
+
},
|
| 1225 |
+
"TensorFlow": {
|
| 1226 |
+
"unclassified": 0,
|
| 1227 |
+
"single": 0,
|
| 1228 |
+
"multi": 0
|
| 1229 |
+
},
|
| 1230 |
+
"Flax": {
|
| 1231 |
+
"unclassified": 0,
|
| 1232 |
+
"single": 0,
|
| 1233 |
+
"multi": 0
|
| 1234 |
+
},
|
| 1235 |
+
"Tokenizers": {
|
| 1236 |
+
"unclassified": 0,
|
| 1237 |
+
"single": 0,
|
| 1238 |
+
"multi": 0
|
| 1239 |
+
},
|
| 1240 |
+
"Pipelines": {
|
| 1241 |
+
"unclassified": 0,
|
| 1242 |
+
"single": 0,
|
| 1243 |
+
"multi": 0
|
| 1244 |
+
},
|
| 1245 |
+
"Trainer": {
|
| 1246 |
+
"unclassified": 0,
|
| 1247 |
+
"single": 0,
|
| 1248 |
+
"multi": 0
|
| 1249 |
+
},
|
| 1250 |
+
"ONNX": {
|
| 1251 |
+
"unclassified": 0,
|
| 1252 |
+
"single": 0,
|
| 1253 |
+
"multi": 0
|
| 1254 |
+
},
|
| 1255 |
+
"Auto": {
|
| 1256 |
+
"unclassified": 0,
|
| 1257 |
+
"single": 0,
|
| 1258 |
+
"multi": 0
|
| 1259 |
+
},
|
| 1260 |
+
"Quantization": {
|
| 1261 |
+
"unclassified": 0,
|
| 1262 |
+
"single": 0,
|
| 1263 |
+
"multi": 0
|
| 1264 |
+
},
|
| 1265 |
+
"Unclassified": {
|
| 1266 |
+
"unclassified": 0,
|
| 1267 |
+
"single": 0,
|
| 1268 |
+
"multi": 0
|
| 1269 |
+
}
|
| 1270 |
+
},
|
| 1271 |
+
"errors": 0,
|
| 1272 |
+
"success": 539,
|
| 1273 |
+
"skipped": 249,
|
| 1274 |
+
"time_spent": [
|
| 1275 |
+
113.42,
|
| 1276 |
+
111.27
|
| 1277 |
+
],
|
| 1278 |
+
"failures": {
|
| 1279 |
+
"multi": [
|
| 1280 |
+
{
|
| 1281 |
+
"line": "tests/models/smolvlm/test_modeling_smolvlm.py::SmolVLMForConditionalGenerationIntegrationTest::test_integration_test_video",
|
| 1282 |
+
"trace": "(line 675) AssertionError: 'User[310 chars]ideo depicts a large language model architectu[58 chars]ture' != 'User[310 chars]ideo showcases a large language model, specifi[56 chars] and'"
|
| 1283 |
+
}
|
| 1284 |
+
],
|
| 1285 |
+
"single": [
|
| 1286 |
+
{
|
| 1287 |
+
"line": "tests/models/smolvlm/test_modeling_smolvlm.py::SmolVLMForConditionalGenerationIntegrationTest::test_integration_test_video",
|
| 1288 |
+
"trace": "(line 675) AssertionError: 'User[310 chars]ideo depicts a large language model architectu[58 chars]ture' != 'User[310 chars]ideo showcases a large language model, specifi[56 chars] and'"
|
| 1289 |
+
}
|
| 1290 |
+
]
|
| 1291 |
+
},
|
| 1292 |
+
"job_link": {
|
| 1293 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344906",
|
| 1294 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344843"
|
| 1295 |
+
}
|
| 1296 |
+
},
|
| 1297 |
+
"models_t5": {
|
| 1298 |
+
"failed": {
|
| 1299 |
+
"PyTorch": {
|
| 1300 |
+
"unclassified": 0,
|
| 1301 |
+
"single": 0,
|
| 1302 |
+
"multi": 0
|
| 1303 |
+
},
|
| 1304 |
+
"TensorFlow": {
|
| 1305 |
+
"unclassified": 0,
|
| 1306 |
+
"single": 0,
|
| 1307 |
+
"multi": 0
|
| 1308 |
+
},
|
| 1309 |
+
"Flax": {
|
| 1310 |
+
"unclassified": 0,
|
| 1311 |
+
"single": 0,
|
| 1312 |
+
"multi": 0
|
| 1313 |
+
},
|
| 1314 |
+
"Tokenizers": {
|
| 1315 |
+
"unclassified": 0,
|
| 1316 |
+
"single": 0,
|
| 1317 |
+
"multi": 0
|
| 1318 |
+
},
|
| 1319 |
+
"Pipelines": {
|
| 1320 |
+
"unclassified": 0,
|
| 1321 |
+
"single": 0,
|
| 1322 |
+
"multi": 0
|
| 1323 |
+
},
|
| 1324 |
+
"Trainer": {
|
| 1325 |
+
"unclassified": 0,
|
| 1326 |
+
"single": 0,
|
| 1327 |
+
"multi": 0
|
| 1328 |
+
},
|
| 1329 |
+
"ONNX": {
|
| 1330 |
+
"unclassified": 0,
|
| 1331 |
+
"single": 0,
|
| 1332 |
+
"multi": 0
|
| 1333 |
+
},
|
| 1334 |
+
"Auto": {
|
| 1335 |
+
"unclassified": 0,
|
| 1336 |
+
"single": 0,
|
| 1337 |
+
"multi": 0
|
| 1338 |
+
},
|
| 1339 |
+
"Quantization": {
|
| 1340 |
+
"unclassified": 0,
|
| 1341 |
+
"single": 0,
|
| 1342 |
+
"multi": 0
|
| 1343 |
+
},
|
| 1344 |
+
"Unclassified": {
|
| 1345 |
+
"unclassified": 0,
|
| 1346 |
+
"single": 0,
|
| 1347 |
+
"multi": 0
|
| 1348 |
+
}
|
| 1349 |
+
},
|
| 1350 |
+
"errors": 0,
|
| 1351 |
+
"success": 613,
|
| 1352 |
+
"skipped": 529,
|
| 1353 |
+
"time_spent": [
|
| 1354 |
+
189.1,
|
| 1355 |
+
203.47
|
| 1356 |
+
],
|
| 1357 |
+
"failures": {},
|
| 1358 |
+
"job_link": {
|
| 1359 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344849",
|
| 1360 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344861"
|
| 1361 |
+
}
|
| 1362 |
+
},
|
| 1363 |
+
"models_table_transformer": {
|
| 1364 |
+
"failed": {
|
| 1365 |
+
"PyTorch": {
|
| 1366 |
+
"unclassified": 0,
|
| 1367 |
+
"single": 1,
|
| 1368 |
+
"multi": 1
|
| 1369 |
+
},
|
| 1370 |
+
"TensorFlow": {
|
| 1371 |
+
"unclassified": 0,
|
| 1372 |
+
"single": 0,
|
| 1373 |
+
"multi": 0
|
| 1374 |
+
},
|
| 1375 |
+
"Flax": {
|
| 1376 |
+
"unclassified": 0,
|
| 1377 |
+
"single": 0,
|
| 1378 |
+
"multi": 0
|
| 1379 |
+
},
|
| 1380 |
+
"Tokenizers": {
|
| 1381 |
+
"unclassified": 0,
|
| 1382 |
+
"single": 0,
|
| 1383 |
+
"multi": 0
|
| 1384 |
+
},
|
| 1385 |
+
"Pipelines": {
|
| 1386 |
+
"unclassified": 0,
|
| 1387 |
+
"single": 0,
|
| 1388 |
+
"multi": 0
|
| 1389 |
+
},
|
| 1390 |
+
"Trainer": {
|
| 1391 |
+
"unclassified": 0,
|
| 1392 |
+
"single": 0,
|
| 1393 |
+
"multi": 0
|
| 1394 |
+
},
|
| 1395 |
+
"ONNX": {
|
| 1396 |
+
"unclassified": 0,
|
| 1397 |
+
"single": 0,
|
| 1398 |
+
"multi": 0
|
| 1399 |
+
},
|
| 1400 |
+
"Auto": {
|
| 1401 |
+
"unclassified": 0,
|
| 1402 |
+
"single": 0,
|
| 1403 |
+
"multi": 0
|
| 1404 |
+
},
|
| 1405 |
+
"Quantization": {
|
| 1406 |
+
"unclassified": 0,
|
| 1407 |
+
"single": 0,
|
| 1408 |
+
"multi": 0
|
| 1409 |
+
},
|
| 1410 |
+
"Unclassified": {
|
| 1411 |
+
"unclassified": 0,
|
| 1412 |
+
"single": 0,
|
| 1413 |
+
"multi": 0
|
| 1414 |
+
}
|
| 1415 |
+
},
|
| 1416 |
+
"errors": 0,
|
| 1417 |
+
"success": 122,
|
| 1418 |
+
"skipped": 264,
|
| 1419 |
+
"time_spent": [
|
| 1420 |
+
53.44,
|
| 1421 |
+
50.21
|
| 1422 |
+
],
|
| 1423 |
+
"failures": {
|
| 1424 |
+
"multi": [
|
| 1425 |
+
{
|
| 1426 |
+
"line": "tests/models/table_transformer/test_modeling_table_transformer.py::TableTransformerModelIntegrationTests::test_table_detection",
|
| 1427 |
+
"trace": "(line 598) AssertionError: Tensor-likes are not close!"
|
| 1428 |
+
}
|
| 1429 |
+
],
|
| 1430 |
+
"single": [
|
| 1431 |
+
{
|
| 1432 |
+
"line": "tests/models/table_transformer/test_modeling_table_transformer.py::TableTransformerModelIntegrationTests::test_table_detection",
|
| 1433 |
+
"trace": "(line 598) AssertionError: Tensor-likes are not close!"
|
| 1434 |
+
}
|
| 1435 |
+
]
|
| 1436 |
+
},
|
| 1437 |
+
"job_link": {
|
| 1438 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344891",
|
| 1439 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344896"
|
| 1440 |
+
}
|
| 1441 |
+
},
|
| 1442 |
+
"models_wav2vec2": {
|
| 1443 |
+
"failed": {
|
| 1444 |
+
"PyTorch": {
|
| 1445 |
+
"unclassified": 0,
|
| 1446 |
+
"single": 0,
|
| 1447 |
+
"multi": 1
|
| 1448 |
+
},
|
| 1449 |
+
"TensorFlow": {
|
| 1450 |
+
"unclassified": 0,
|
| 1451 |
+
"single": 0,
|
| 1452 |
+
"multi": 0
|
| 1453 |
+
},
|
| 1454 |
+
"Flax": {
|
| 1455 |
+
"unclassified": 0,
|
| 1456 |
+
"single": 0,
|
| 1457 |
+
"multi": 0
|
| 1458 |
+
},
|
| 1459 |
+
"Tokenizers": {
|
| 1460 |
+
"unclassified": 0,
|
| 1461 |
+
"single": 0,
|
| 1462 |
+
"multi": 0
|
| 1463 |
+
},
|
| 1464 |
+
"Pipelines": {
|
| 1465 |
+
"unclassified": 0,
|
| 1466 |
+
"single": 0,
|
| 1467 |
+
"multi": 0
|
| 1468 |
+
},
|
| 1469 |
+
"Trainer": {
|
| 1470 |
+
"unclassified": 0,
|
| 1471 |
+
"single": 0,
|
| 1472 |
+
"multi": 0
|
| 1473 |
+
},
|
| 1474 |
+
"ONNX": {
|
| 1475 |
+
"unclassified": 0,
|
| 1476 |
+
"single": 0,
|
| 1477 |
+
"multi": 0
|
| 1478 |
+
},
|
| 1479 |
+
"Auto": {
|
| 1480 |
+
"unclassified": 0,
|
| 1481 |
+
"single": 0,
|
| 1482 |
+
"multi": 0
|
| 1483 |
+
},
|
| 1484 |
+
"Quantization": {
|
| 1485 |
+
"unclassified": 0,
|
| 1486 |
+
"single": 0,
|
| 1487 |
+
"multi": 0
|
| 1488 |
+
},
|
| 1489 |
+
"Unclassified": {
|
| 1490 |
+
"unclassified": 0,
|
| 1491 |
+
"single": 0,
|
| 1492 |
+
"multi": 0
|
| 1493 |
+
}
|
| 1494 |
+
},
|
| 1495 |
+
"errors": 0,
|
| 1496 |
+
"success": 361,
|
| 1497 |
+
"skipped": 204,
|
| 1498 |
+
"time_spent": [
|
| 1499 |
+
343.77
|
| 1500 |
+
],
|
| 1501 |
+
"failures": {
|
| 1502 |
+
"multi": [
|
| 1503 |
+
{
|
| 1504 |
+
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_inference_mms_1b_all",
|
| 1505 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 1506 |
+
}
|
| 1507 |
+
]
|
| 1508 |
+
},
|
| 1509 |
+
"job_link": {
|
| 1510 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17231951358/job/48888344901"
|
| 1511 |
+
}
|
| 1512 |
+
}
|
| 1513 |
+
}
|