Upload 2025-11-25/runs/23971-19685364855/ci_results_run_models_gpu/model_results.json with huggingface_hub
Browse files
2025-11-25/runs/23971-19685364855/ci_results_run_models_gpu/model_results.json
ADDED
|
@@ -0,0 +1,878 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"models_bigbird_pegasus": {
|
| 3 |
+
"failed": {
|
| 4 |
+
"PyTorch": {
|
| 5 |
+
"unclassified": 0,
|
| 6 |
+
"single": 0,
|
| 7 |
+
"multi": 0
|
| 8 |
+
},
|
| 9 |
+
"Tokenizers": {
|
| 10 |
+
"unclassified": 0,
|
| 11 |
+
"single": 0,
|
| 12 |
+
"multi": 0
|
| 13 |
+
},
|
| 14 |
+
"Pipelines": {
|
| 15 |
+
"unclassified": 0,
|
| 16 |
+
"single": 0,
|
| 17 |
+
"multi": 0
|
| 18 |
+
},
|
| 19 |
+
"Trainer": {
|
| 20 |
+
"unclassified": 0,
|
| 21 |
+
"single": 0,
|
| 22 |
+
"multi": 0
|
| 23 |
+
},
|
| 24 |
+
"ONNX": {
|
| 25 |
+
"unclassified": 0,
|
| 26 |
+
"single": 0,
|
| 27 |
+
"multi": 0
|
| 28 |
+
},
|
| 29 |
+
"Auto": {
|
| 30 |
+
"unclassified": 0,
|
| 31 |
+
"single": 0,
|
| 32 |
+
"multi": 0
|
| 33 |
+
},
|
| 34 |
+
"Quantization": {
|
| 35 |
+
"unclassified": 0,
|
| 36 |
+
"single": 0,
|
| 37 |
+
"multi": 0
|
| 38 |
+
},
|
| 39 |
+
"Unclassified": {
|
| 40 |
+
"unclassified": 0,
|
| 41 |
+
"single": 0,
|
| 42 |
+
"multi": 0
|
| 43 |
+
}
|
| 44 |
+
},
|
| 45 |
+
"errors": 0,
|
| 46 |
+
"success": 314,
|
| 47 |
+
"skipped": 426,
|
| 48 |
+
"time_spent": [
|
| 49 |
+
129.4,
|
| 50 |
+
124.71
|
| 51 |
+
],
|
| 52 |
+
"error": false,
|
| 53 |
+
"failures": {},
|
| 54 |
+
"job_link": {
|
| 55 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042880",
|
| 56 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042955"
|
| 57 |
+
},
|
| 58 |
+
"captured_info": {}
|
| 59 |
+
},
|
| 60 |
+
"models_fsmt": {
|
| 61 |
+
"failed": {
|
| 62 |
+
"PyTorch": {
|
| 63 |
+
"unclassified": 0,
|
| 64 |
+
"single": 5,
|
| 65 |
+
"multi": 5
|
| 66 |
+
},
|
| 67 |
+
"Tokenizers": {
|
| 68 |
+
"unclassified": 0,
|
| 69 |
+
"single": 0,
|
| 70 |
+
"multi": 0
|
| 71 |
+
},
|
| 72 |
+
"Pipelines": {
|
| 73 |
+
"unclassified": 0,
|
| 74 |
+
"single": 0,
|
| 75 |
+
"multi": 0
|
| 76 |
+
},
|
| 77 |
+
"Trainer": {
|
| 78 |
+
"unclassified": 0,
|
| 79 |
+
"single": 0,
|
| 80 |
+
"multi": 0
|
| 81 |
+
},
|
| 82 |
+
"ONNX": {
|
| 83 |
+
"unclassified": 0,
|
| 84 |
+
"single": 0,
|
| 85 |
+
"multi": 0
|
| 86 |
+
},
|
| 87 |
+
"Auto": {
|
| 88 |
+
"unclassified": 0,
|
| 89 |
+
"single": 0,
|
| 90 |
+
"multi": 0
|
| 91 |
+
},
|
| 92 |
+
"Quantization": {
|
| 93 |
+
"unclassified": 0,
|
| 94 |
+
"single": 0,
|
| 95 |
+
"multi": 0
|
| 96 |
+
},
|
| 97 |
+
"Unclassified": {
|
| 98 |
+
"unclassified": 0,
|
| 99 |
+
"single": 0,
|
| 100 |
+
"multi": 0
|
| 101 |
+
}
|
| 102 |
+
},
|
| 103 |
+
"errors": 0,
|
| 104 |
+
"success": 317,
|
| 105 |
+
"skipped": 337,
|
| 106 |
+
"time_spent": [
|
| 107 |
+
57.88,
|
| 108 |
+
56.59
|
| 109 |
+
],
|
| 110 |
+
"error": false,
|
| 111 |
+
"failures": {
|
| 112 |
+
"multi": [
|
| 113 |
+
{
|
| 114 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_inference_no_head",
|
| 115 |
+
"trace": "(line 497) AssertionError: Tensor-likes are not close!"
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_direct_0_en_ru",
|
| 119 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_direct_1_ru_en",
|
| 123 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_pipeline_0_en_ru",
|
| 127 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_pipeline_1_ru_en",
|
| 131 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 132 |
+
}
|
| 133 |
+
],
|
| 134 |
+
"single": [
|
| 135 |
+
{
|
| 136 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_inference_no_head",
|
| 137 |
+
"trace": "(line 497) AssertionError: Tensor-likes are not close!"
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_direct_0_en_ru",
|
| 141 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_direct_1_ru_en",
|
| 145 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_pipeline_0_en_ru",
|
| 149 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"line": "tests/models/fsmt/test_modeling_fsmt.py::FSMTModelIntegrationTests::test_translation_pipeline_1_ru_en",
|
| 153 |
+
"trace": "(line 3223) RuntimeError: The weights trying to be saved contained shared tensors [{'model.decoder.embed_tokens.weight', 'model.decoder.output_projection.weight'}] which are not properly defined. We found `_tied_weights_keys` to be: set()."
|
| 154 |
+
}
|
| 155 |
+
]
|
| 156 |
+
},
|
| 157 |
+
"job_link": {
|
| 158 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042967",
|
| 159 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042913"
|
| 160 |
+
},
|
| 161 |
+
"captured_info": {
|
| 162 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042967#step:16:1",
|
| 163 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042913#step:16:1"
|
| 164 |
+
}
|
| 165 |
+
},
|
| 166 |
+
"models_internvl": {
|
| 167 |
+
"failed": {
|
| 168 |
+
"PyTorch": {
|
| 169 |
+
"unclassified": 0,
|
| 170 |
+
"single": 0,
|
| 171 |
+
"multi": 1
|
| 172 |
+
},
|
| 173 |
+
"Tokenizers": {
|
| 174 |
+
"unclassified": 0,
|
| 175 |
+
"single": 0,
|
| 176 |
+
"multi": 0
|
| 177 |
+
},
|
| 178 |
+
"Pipelines": {
|
| 179 |
+
"unclassified": 0,
|
| 180 |
+
"single": 0,
|
| 181 |
+
"multi": 0
|
| 182 |
+
},
|
| 183 |
+
"Trainer": {
|
| 184 |
+
"unclassified": 0,
|
| 185 |
+
"single": 0,
|
| 186 |
+
"multi": 0
|
| 187 |
+
},
|
| 188 |
+
"ONNX": {
|
| 189 |
+
"unclassified": 0,
|
| 190 |
+
"single": 0,
|
| 191 |
+
"multi": 0
|
| 192 |
+
},
|
| 193 |
+
"Auto": {
|
| 194 |
+
"unclassified": 0,
|
| 195 |
+
"single": 0,
|
| 196 |
+
"multi": 0
|
| 197 |
+
},
|
| 198 |
+
"Quantization": {
|
| 199 |
+
"unclassified": 0,
|
| 200 |
+
"single": 0,
|
| 201 |
+
"multi": 0
|
| 202 |
+
},
|
| 203 |
+
"Unclassified": {
|
| 204 |
+
"unclassified": 0,
|
| 205 |
+
"single": 0,
|
| 206 |
+
"multi": 0
|
| 207 |
+
}
|
| 208 |
+
},
|
| 209 |
+
"errors": 0,
|
| 210 |
+
"success": 358,
|
| 211 |
+
"skipped": 215,
|
| 212 |
+
"time_spent": [
|
| 213 |
+
235.73,
|
| 214 |
+
234.52
|
| 215 |
+
],
|
| 216 |
+
"error": false,
|
| 217 |
+
"failures": {
|
| 218 |
+
"multi": [
|
| 219 |
+
{
|
| 220 |
+
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_multi_gpu_data_parallel_forward",
|
| 221 |
+
"trace": "(line 769) StopIteration: Caught StopIteration in replica 1 on device 1."
|
| 222 |
+
}
|
| 223 |
+
]
|
| 224 |
+
},
|
| 225 |
+
"job_link": {
|
| 226 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042894",
|
| 227 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390043000"
|
| 228 |
+
},
|
| 229 |
+
"captured_info": {}
|
| 230 |
+
},
|
| 231 |
+
"models_llava": {
|
| 232 |
+
"failed": {
|
| 233 |
+
"PyTorch": {
|
| 234 |
+
"unclassified": 0,
|
| 235 |
+
"single": 9,
|
| 236 |
+
"multi": 6
|
| 237 |
+
},
|
| 238 |
+
"Tokenizers": {
|
| 239 |
+
"unclassified": 0,
|
| 240 |
+
"single": 0,
|
| 241 |
+
"multi": 0
|
| 242 |
+
},
|
| 243 |
+
"Pipelines": {
|
| 244 |
+
"unclassified": 0,
|
| 245 |
+
"single": 0,
|
| 246 |
+
"multi": 0
|
| 247 |
+
},
|
| 248 |
+
"Trainer": {
|
| 249 |
+
"unclassified": 0,
|
| 250 |
+
"single": 0,
|
| 251 |
+
"multi": 0
|
| 252 |
+
},
|
| 253 |
+
"ONNX": {
|
| 254 |
+
"unclassified": 0,
|
| 255 |
+
"single": 0,
|
| 256 |
+
"multi": 0
|
| 257 |
+
},
|
| 258 |
+
"Auto": {
|
| 259 |
+
"unclassified": 0,
|
| 260 |
+
"single": 0,
|
| 261 |
+
"multi": 0
|
| 262 |
+
},
|
| 263 |
+
"Quantization": {
|
| 264 |
+
"unclassified": 0,
|
| 265 |
+
"single": 0,
|
| 266 |
+
"multi": 0
|
| 267 |
+
},
|
| 268 |
+
"Unclassified": {
|
| 269 |
+
"unclassified": 0,
|
| 270 |
+
"single": 0,
|
| 271 |
+
"multi": 0
|
| 272 |
+
}
|
| 273 |
+
},
|
| 274 |
+
"errors": 0,
|
| 275 |
+
"success": 336,
|
| 276 |
+
"skipped": 131,
|
| 277 |
+
"time_spent": [
|
| 278 |
+
466.72,
|
| 279 |
+
474.11
|
| 280 |
+
],
|
| 281 |
+
"error": false,
|
| 282 |
+
"failures": {
|
| 283 |
+
"multi": [
|
| 284 |
+
{
|
| 285 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation",
|
| 286 |
+
"trace": "(line 555) AssertionError: Lists differ: [\"\\n [52 chars]ANT: In the two images, the primary difference[287 chars]ama'] != [\"\\n [52 chars]ANT: The difference between the two images is [304 chars]the']"
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_generation_no_images",
|
| 290 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 62.69 MiB is free. Process 22359 has 22.23 GiB memory in use. Of the allocated memory 21.73 GiB is allocated by PyTorch, and 14.09 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_4bit",
|
| 294 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 40.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 30.69 MiB is free. Process 22359 has 22.27 GiB memory in use. Of the allocated memory 21.78 GiB is allocated by PyTorch, and 3.87 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_batched",
|
| 298 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.25 GiB. GPU 0 has a total capacity of 22.30 GiB of which 704.00 KiB is free. Process 22359 has 22.29 GiB memory in use. Of the allocated memory 21.81 GiB is allocated by PyTorch, and 1.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test",
|
| 302 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 28.69 MiB is free. Process 22359 has 22.27 GiB memory in use. Of the allocated memory 21.75 GiB is allocated by PyTorch, and 28.53 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
|
| 306 |
+
"trace": "(line 1364) NotImplementedError: Cannot copy out of meta tensor; no data! Please use torch.nn.Module.to_empty() instead of torch.nn.Module.to() when moving module from meta to a different device."
|
| 307 |
+
}
|
| 308 |
+
],
|
| 309 |
+
"single": [
|
| 310 |
+
{
|
| 311 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation",
|
| 312 |
+
"trace": "(line 184) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 148.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 24.69 MiB is free. Process 17921 has 22.27 GiB memory in use. Of the allocated memory 21.00 GiB is allocated by PyTorch, and 910.30 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_generation_no_images",
|
| 316 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 252.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 172.69 MiB is free. Process 17921 has 22.13 GiB memory in use. Of the allocated memory 20.71 GiB is allocated by PyTorch, and 1.03 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_4bit",
|
| 320 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 40.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 16.69 MiB is free. Process 17921 has 22.28 GiB memory in use. Of the allocated memory 21.89 GiB is allocated by PyTorch, and 2.50 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_batched",
|
| 324 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.25 GiB. GPU 0 has a total capacity of 22.30 GiB of which 6.69 MiB is free. Process 17921 has 22.29 GiB memory in use. Of the allocated memory 21.90 GiB is allocated by PyTorch, and 2.18 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test",
|
| 328 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 40.69 MiB is free. Process 17921 has 22.26 GiB memory in use. Of the allocated memory 21.83 GiB is allocated by PyTorch, and 44.48 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
|
| 332 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 98.69 MiB is free. Process 17921 has 22.20 GiB memory in use. Of the allocated memory 21.75 GiB is allocated by PyTorch, and 68.16 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched",
|
| 336 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 14.69 MiB is free. Process 17921 has 22.28 GiB memory in use. Of the allocated memory 21.88 GiB is allocated by PyTorch, and 21.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 337 |
+
},
|
| 338 |
+
{
|
| 339 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched_regression",
|
| 340 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 16.69 MiB is free. Process 17921 has 22.28 GiB memory in use. Of the allocated memory 21.87 GiB is allocated by PyTorch, and 28.31 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_single",
|
| 344 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 84.69 MiB is free. Process 17921 has 22.21 GiB memory in use. Of the allocated memory 21.81 GiB is allocated by PyTorch, and 24.34 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 345 |
+
}
|
| 346 |
+
]
|
| 347 |
+
},
|
| 348 |
+
"job_link": {
|
| 349 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042958",
|
| 350 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042889"
|
| 351 |
+
},
|
| 352 |
+
"captured_info": {
|
| 353 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042958#step:16:1"
|
| 354 |
+
}
|
| 355 |
+
},
|
| 356 |
+
"models_llava_next": {
|
| 357 |
+
"failed": {
|
| 358 |
+
"PyTorch": {
|
| 359 |
+
"unclassified": 0,
|
| 360 |
+
"single": 0,
|
| 361 |
+
"multi": 0
|
| 362 |
+
},
|
| 363 |
+
"Tokenizers": {
|
| 364 |
+
"unclassified": 0,
|
| 365 |
+
"single": 0,
|
| 366 |
+
"multi": 0
|
| 367 |
+
},
|
| 368 |
+
"Pipelines": {
|
| 369 |
+
"unclassified": 0,
|
| 370 |
+
"single": 0,
|
| 371 |
+
"multi": 0
|
| 372 |
+
},
|
| 373 |
+
"Trainer": {
|
| 374 |
+
"unclassified": 0,
|
| 375 |
+
"single": 0,
|
| 376 |
+
"multi": 0
|
| 377 |
+
},
|
| 378 |
+
"ONNX": {
|
| 379 |
+
"unclassified": 0,
|
| 380 |
+
"single": 0,
|
| 381 |
+
"multi": 0
|
| 382 |
+
},
|
| 383 |
+
"Auto": {
|
| 384 |
+
"unclassified": 0,
|
| 385 |
+
"single": 0,
|
| 386 |
+
"multi": 0
|
| 387 |
+
},
|
| 388 |
+
"Quantization": {
|
| 389 |
+
"unclassified": 0,
|
| 390 |
+
"single": 0,
|
| 391 |
+
"multi": 0
|
| 392 |
+
},
|
| 393 |
+
"Unclassified": {
|
| 394 |
+
"unclassified": 0,
|
| 395 |
+
"single": 0,
|
| 396 |
+
"multi": 0
|
| 397 |
+
}
|
| 398 |
+
},
|
| 399 |
+
"errors": 0,
|
| 400 |
+
"success": 339,
|
| 401 |
+
"skipped": 133,
|
| 402 |
+
"time_spent": [
|
| 403 |
+
445.0,
|
| 404 |
+
447.58
|
| 405 |
+
],
|
| 406 |
+
"error": false,
|
| 407 |
+
"failures": {},
|
| 408 |
+
"job_link": {
|
| 409 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042953",
|
| 410 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042898"
|
| 411 |
+
},
|
| 412 |
+
"captured_info": {}
|
| 413 |
+
},
|
| 414 |
+
"models_llava_next_video": {
|
| 415 |
+
"failed": {
|
| 416 |
+
"PyTorch": {
|
| 417 |
+
"unclassified": 0,
|
| 418 |
+
"single": 0,
|
| 419 |
+
"multi": 0
|
| 420 |
+
},
|
| 421 |
+
"Tokenizers": {
|
| 422 |
+
"unclassified": 0,
|
| 423 |
+
"single": 0,
|
| 424 |
+
"multi": 0
|
| 425 |
+
},
|
| 426 |
+
"Pipelines": {
|
| 427 |
+
"unclassified": 0,
|
| 428 |
+
"single": 0,
|
| 429 |
+
"multi": 0
|
| 430 |
+
},
|
| 431 |
+
"Trainer": {
|
| 432 |
+
"unclassified": 0,
|
| 433 |
+
"single": 0,
|
| 434 |
+
"multi": 0
|
| 435 |
+
},
|
| 436 |
+
"ONNX": {
|
| 437 |
+
"unclassified": 0,
|
| 438 |
+
"single": 0,
|
| 439 |
+
"multi": 0
|
| 440 |
+
},
|
| 441 |
+
"Auto": {
|
| 442 |
+
"unclassified": 0,
|
| 443 |
+
"single": 0,
|
| 444 |
+
"multi": 0
|
| 445 |
+
},
|
| 446 |
+
"Quantization": {
|
| 447 |
+
"unclassified": 0,
|
| 448 |
+
"single": 0,
|
| 449 |
+
"multi": 0
|
| 450 |
+
},
|
| 451 |
+
"Unclassified": {
|
| 452 |
+
"unclassified": 0,
|
| 453 |
+
"single": 0,
|
| 454 |
+
"multi": 0
|
| 455 |
+
}
|
| 456 |
+
},
|
| 457 |
+
"errors": 0,
|
| 458 |
+
"success": 347,
|
| 459 |
+
"skipped": 101,
|
| 460 |
+
"time_spent": [
|
| 461 |
+
218.85,
|
| 462 |
+
213.34
|
| 463 |
+
],
|
| 464 |
+
"error": false,
|
| 465 |
+
"failures": {},
|
| 466 |
+
"job_link": {
|
| 467 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042891",
|
| 468 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042940"
|
| 469 |
+
},
|
| 470 |
+
"captured_info": {}
|
| 471 |
+
},
|
| 472 |
+
"models_mistral": {
|
| 473 |
+
"failed": {
|
| 474 |
+
"PyTorch": {
|
| 475 |
+
"unclassified": 0,
|
| 476 |
+
"single": 0,
|
| 477 |
+
"multi": 0
|
| 478 |
+
},
|
| 479 |
+
"Tokenizers": {
|
| 480 |
+
"unclassified": 0,
|
| 481 |
+
"single": 0,
|
| 482 |
+
"multi": 0
|
| 483 |
+
},
|
| 484 |
+
"Pipelines": {
|
| 485 |
+
"unclassified": 0,
|
| 486 |
+
"single": 0,
|
| 487 |
+
"multi": 0
|
| 488 |
+
},
|
| 489 |
+
"Trainer": {
|
| 490 |
+
"unclassified": 0,
|
| 491 |
+
"single": 0,
|
| 492 |
+
"multi": 0
|
| 493 |
+
},
|
| 494 |
+
"ONNX": {
|
| 495 |
+
"unclassified": 0,
|
| 496 |
+
"single": 0,
|
| 497 |
+
"multi": 0
|
| 498 |
+
},
|
| 499 |
+
"Auto": {
|
| 500 |
+
"unclassified": 0,
|
| 501 |
+
"single": 0,
|
| 502 |
+
"multi": 0
|
| 503 |
+
},
|
| 504 |
+
"Quantization": {
|
| 505 |
+
"unclassified": 0,
|
| 506 |
+
"single": 0,
|
| 507 |
+
"multi": 0
|
| 508 |
+
},
|
| 509 |
+
"Unclassified": {
|
| 510 |
+
"unclassified": 0,
|
| 511 |
+
"single": 0,
|
| 512 |
+
"multi": 0
|
| 513 |
+
}
|
| 514 |
+
},
|
| 515 |
+
"errors": 0,
|
| 516 |
+
"success": 285,
|
| 517 |
+
"skipped": 179,
|
| 518 |
+
"time_spent": [
|
| 519 |
+
230.86,
|
| 520 |
+
230.22
|
| 521 |
+
],
|
| 522 |
+
"error": false,
|
| 523 |
+
"failures": {},
|
| 524 |
+
"job_link": {
|
| 525 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042927",
|
| 526 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042941"
|
| 527 |
+
},
|
| 528 |
+
"captured_info": {
|
| 529 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042927#step:16:1",
|
| 530 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042941#step:16:1"
|
| 531 |
+
}
|
| 532 |
+
},
|
| 533 |
+
"models_mistral3": {
|
| 534 |
+
"failed": {
|
| 535 |
+
"PyTorch": {
|
| 536 |
+
"unclassified": 0,
|
| 537 |
+
"single": 2,
|
| 538 |
+
"multi": 2
|
| 539 |
+
},
|
| 540 |
+
"Tokenizers": {
|
| 541 |
+
"unclassified": 0,
|
| 542 |
+
"single": 0,
|
| 543 |
+
"multi": 0
|
| 544 |
+
},
|
| 545 |
+
"Pipelines": {
|
| 546 |
+
"unclassified": 0,
|
| 547 |
+
"single": 0,
|
| 548 |
+
"multi": 0
|
| 549 |
+
},
|
| 550 |
+
"Trainer": {
|
| 551 |
+
"unclassified": 0,
|
| 552 |
+
"single": 0,
|
| 553 |
+
"multi": 0
|
| 554 |
+
},
|
| 555 |
+
"ONNX": {
|
| 556 |
+
"unclassified": 0,
|
| 557 |
+
"single": 0,
|
| 558 |
+
"multi": 0
|
| 559 |
+
},
|
| 560 |
+
"Auto": {
|
| 561 |
+
"unclassified": 0,
|
| 562 |
+
"single": 0,
|
| 563 |
+
"multi": 0
|
| 564 |
+
},
|
| 565 |
+
"Quantization": {
|
| 566 |
+
"unclassified": 0,
|
| 567 |
+
"single": 0,
|
| 568 |
+
"multi": 0
|
| 569 |
+
},
|
| 570 |
+
"Unclassified": {
|
| 571 |
+
"unclassified": 0,
|
| 572 |
+
"single": 0,
|
| 573 |
+
"multi": 0
|
| 574 |
+
}
|
| 575 |
+
},
|
| 576 |
+
"errors": 0,
|
| 577 |
+
"success": 279,
|
| 578 |
+
"skipped": 239,
|
| 579 |
+
"time_spent": [
|
| 580 |
+
608.02,
|
| 581 |
+
626.28
|
| 582 |
+
],
|
| 583 |
+
"error": false,
|
| 584 |
+
"failures": {
|
| 585 |
+
"multi": [
|
| 586 |
+
{
|
| 587 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate",
|
| 588 |
+
"trace": "(line 363) AssertionError: 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the woods' != \"Wooden path to calm,\\nReflections whisper secrets,\\nNature's peace unfolds.\""
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate_multi_image",
|
| 592 |
+
"trace": "(line 439) AssertionError: \"Calm waters reflect\\nWooden path to distant shore\\nPeace in nature's hold\" != 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the scene'"
|
| 593 |
+
}
|
| 594 |
+
],
|
| 595 |
+
"single": [
|
| 596 |
+
{
|
| 597 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate",
|
| 598 |
+
"trace": "(line 363) AssertionError: 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the woods' != \"Wooden path to calm,\\nReflections whisper secrets,\\nNature's peace unfolds.\""
|
| 599 |
+
},
|
| 600 |
+
{
|
| 601 |
+
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_batched_generate_multi_image",
|
| 602 |
+
"trace": "(line 439) AssertionError: \"Calm waters reflect\\nWooden path to distant shore\\nPeace in nature's hold\" != 'Calm waters reflect\\nWooden path to distant shore\\nSilence in the scene'"
|
| 603 |
+
}
|
| 604 |
+
]
|
| 605 |
+
},
|
| 606 |
+
"job_link": {
|
| 607 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042934",
|
| 608 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042888"
|
| 609 |
+
},
|
| 610 |
+
"captured_info": {
|
| 611 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042934#step:16:1",
|
| 612 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042888#step:16:1"
|
| 613 |
+
}
|
| 614 |
+
},
|
| 615 |
+
"models_olmoe": {
|
| 616 |
+
"failed": {
|
| 617 |
+
"PyTorch": {
|
| 618 |
+
"unclassified": 0,
|
| 619 |
+
"single": 0,
|
| 620 |
+
"multi": 2
|
| 621 |
+
},
|
| 622 |
+
"Tokenizers": {
|
| 623 |
+
"unclassified": 0,
|
| 624 |
+
"single": 0,
|
| 625 |
+
"multi": 0
|
| 626 |
+
},
|
| 627 |
+
"Pipelines": {
|
| 628 |
+
"unclassified": 0,
|
| 629 |
+
"single": 0,
|
| 630 |
+
"multi": 0
|
| 631 |
+
},
|
| 632 |
+
"Trainer": {
|
| 633 |
+
"unclassified": 0,
|
| 634 |
+
"single": 0,
|
| 635 |
+
"multi": 0
|
| 636 |
+
},
|
| 637 |
+
"ONNX": {
|
| 638 |
+
"unclassified": 0,
|
| 639 |
+
"single": 0,
|
| 640 |
+
"multi": 0
|
| 641 |
+
},
|
| 642 |
+
"Auto": {
|
| 643 |
+
"unclassified": 0,
|
| 644 |
+
"single": 0,
|
| 645 |
+
"multi": 0
|
| 646 |
+
},
|
| 647 |
+
"Quantization": {
|
| 648 |
+
"unclassified": 0,
|
| 649 |
+
"single": 0,
|
| 650 |
+
"multi": 0
|
| 651 |
+
},
|
| 652 |
+
"Unclassified": {
|
| 653 |
+
"unclassified": 0,
|
| 654 |
+
"single": 0,
|
| 655 |
+
"multi": 0
|
| 656 |
+
}
|
| 657 |
+
},
|
| 658 |
+
"errors": 0,
|
| 659 |
+
"success": 111,
|
| 660 |
+
"skipped": 98,
|
| 661 |
+
"time_spent": [
|
| 662 |
+
92.35
|
| 663 |
+
],
|
| 664 |
+
"error": false,
|
| 665 |
+
"failures": {
|
| 666 |
+
"multi": [
|
| 667 |
+
{
|
| 668 |
+
"line": "tests/models/olmoe/test_modeling_olmoe.py::OlmoeIntegrationTest::test_model_7b_greedy_generation",
|
| 669 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 18.69 MiB is free. Process 22696 has 22.28 GiB memory in use. Of the allocated memory 21.45 GiB is allocated by PyTorch, and 375.02 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"line": "tests/models/olmoe/test_modeling_olmoe.py::OlmoeIntegrationTest::test_model_7b_logits",
|
| 673 |
+
"trace": "(line 401) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacity of 22.30 GiB of which 2.69 MiB is free. Process 22696 has 22.29 GiB memory in use. Of the allocated memory 21.12 GiB is allocated by PyTorch, and 786.95 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 674 |
+
}
|
| 675 |
+
]
|
| 676 |
+
},
|
| 677 |
+
"job_link": {
|
| 678 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042961"
|
| 679 |
+
},
|
| 680 |
+
"captured_info": {}
|
| 681 |
+
},
|
| 682 |
+
"models_phi3": {
|
| 683 |
+
"failed": {
|
| 684 |
+
"PyTorch": {
|
| 685 |
+
"unclassified": 0,
|
| 686 |
+
"single": 5,
|
| 687 |
+
"multi": 5
|
| 688 |
+
},
|
| 689 |
+
"Tokenizers": {
|
| 690 |
+
"unclassified": 0,
|
| 691 |
+
"single": 0,
|
| 692 |
+
"multi": 0
|
| 693 |
+
},
|
| 694 |
+
"Pipelines": {
|
| 695 |
+
"unclassified": 0,
|
| 696 |
+
"single": 0,
|
| 697 |
+
"multi": 0
|
| 698 |
+
},
|
| 699 |
+
"Trainer": {
|
| 700 |
+
"unclassified": 0,
|
| 701 |
+
"single": 0,
|
| 702 |
+
"multi": 0
|
| 703 |
+
},
|
| 704 |
+
"ONNX": {
|
| 705 |
+
"unclassified": 0,
|
| 706 |
+
"single": 0,
|
| 707 |
+
"multi": 0
|
| 708 |
+
},
|
| 709 |
+
"Auto": {
|
| 710 |
+
"unclassified": 0,
|
| 711 |
+
"single": 0,
|
| 712 |
+
"multi": 0
|
| 713 |
+
},
|
| 714 |
+
"Quantization": {
|
| 715 |
+
"unclassified": 0,
|
| 716 |
+
"single": 0,
|
| 717 |
+
"multi": 0
|
| 718 |
+
},
|
| 719 |
+
"Unclassified": {
|
| 720 |
+
"unclassified": 0,
|
| 721 |
+
"single": 0,
|
| 722 |
+
"multi": 0
|
| 723 |
+
}
|
| 724 |
+
},
|
| 725 |
+
"errors": 0,
|
| 726 |
+
"success": 257,
|
| 727 |
+
"skipped": 185,
|
| 728 |
+
"time_spent": [
|
| 729 |
+
151.4,
|
| 730 |
+
157.15
|
| 731 |
+
],
|
| 732 |
+
"error": false,
|
| 733 |
+
"failures": {
|
| 734 |
+
"multi": [
|
| 735 |
+
{
|
| 736 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_export_static_cache",
|
| 737 |
+
"trace": "(line 841) torch._dynamo.exc.Unsupported: Data-dependent branching"
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_128k_instruct_generation",
|
| 741 |
+
"trace": "(line 289) AttributeError"
|
| 742 |
+
},
|
| 743 |
+
{
|
| 744 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_128k_instruct_with_static_cache",
|
| 745 |
+
"trace": "(line 289) AttributeError"
|
| 746 |
+
},
|
| 747 |
+
{
|
| 748 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_4k_instruct_generation",
|
| 749 |
+
"trace": "(line 289) AttributeError"
|
| 750 |
+
},
|
| 751 |
+
{
|
| 752 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_4k_instruct_with_static_cache",
|
| 753 |
+
"trace": "(line 289) AttributeError"
|
| 754 |
+
}
|
| 755 |
+
],
|
| 756 |
+
"single": [
|
| 757 |
+
{
|
| 758 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_export_static_cache",
|
| 759 |
+
"trace": "(line 841) torch._dynamo.exc.Unsupported: Data-dependent branching"
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_128k_instruct_generation",
|
| 763 |
+
"trace": "(line 289) AttributeError"
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_128k_instruct_with_static_cache",
|
| 767 |
+
"trace": "(line 289) AttributeError"
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_4k_instruct_generation",
|
| 771 |
+
"trace": "(line 289) AttributeError"
|
| 772 |
+
},
|
| 773 |
+
{
|
| 774 |
+
"line": "tests/models/phi3/test_modeling_phi3.py::Phi3IntegrationTest::test_phi3_mini_4k_instruct_with_static_cache",
|
| 775 |
+
"trace": "(line 289) AttributeError"
|
| 776 |
+
}
|
| 777 |
+
]
|
| 778 |
+
},
|
| 779 |
+
"job_link": {
|
| 780 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042982",
|
| 781 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042897"
|
| 782 |
+
},
|
| 783 |
+
"captured_info": {
|
| 784 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042982#step:16:1",
|
| 785 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042897#step:16:1"
|
| 786 |
+
}
|
| 787 |
+
},
|
| 788 |
+
"models_whisper": {
|
| 789 |
+
"failed": {
|
| 790 |
+
"PyTorch": {
|
| 791 |
+
"unclassified": 0,
|
| 792 |
+
"single": 3,
|
| 793 |
+
"multi": 3
|
| 794 |
+
},
|
| 795 |
+
"Tokenizers": {
|
| 796 |
+
"unclassified": 0,
|
| 797 |
+
"single": 0,
|
| 798 |
+
"multi": 0
|
| 799 |
+
},
|
| 800 |
+
"Pipelines": {
|
| 801 |
+
"unclassified": 0,
|
| 802 |
+
"single": 0,
|
| 803 |
+
"multi": 0
|
| 804 |
+
},
|
| 805 |
+
"Trainer": {
|
| 806 |
+
"unclassified": 0,
|
| 807 |
+
"single": 0,
|
| 808 |
+
"multi": 0
|
| 809 |
+
},
|
| 810 |
+
"ONNX": {
|
| 811 |
+
"unclassified": 0,
|
| 812 |
+
"single": 0,
|
| 813 |
+
"multi": 0
|
| 814 |
+
},
|
| 815 |
+
"Auto": {
|
| 816 |
+
"unclassified": 0,
|
| 817 |
+
"single": 0,
|
| 818 |
+
"multi": 0
|
| 819 |
+
},
|
| 820 |
+
"Quantization": {
|
| 821 |
+
"unclassified": 0,
|
| 822 |
+
"single": 0,
|
| 823 |
+
"multi": 0
|
| 824 |
+
},
|
| 825 |
+
"Unclassified": {
|
| 826 |
+
"unclassified": 0,
|
| 827 |
+
"single": 0,
|
| 828 |
+
"multi": 0
|
| 829 |
+
}
|
| 830 |
+
},
|
| 831 |
+
"errors": 0,
|
| 832 |
+
"success": 987,
|
| 833 |
+
"skipped": 393,
|
| 834 |
+
"time_spent": [
|
| 835 |
+
743.34,
|
| 836 |
+
725.55
|
| 837 |
+
],
|
| 838 |
+
"error": false,
|
| 839 |
+
"failures": {
|
| 840 |
+
"multi": [
|
| 841 |
+
{
|
| 842 |
+
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation",
|
| 843 |
+
"trace": "(line 1893) AssertionError: Lists differ: [{'te[649 chars] 45.38)}, {'text': ' can discover in it but li[620 chars]16)}] != [{'te[649 chars] 45.36)}, {'text': ' can discover in it but li[620 chars]16)}]"
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard",
|
| 847 |
+
"trace": "(line 2798) AssertionError: Lists differ: [\" Fo[272 chars]ting of classics, Sicilian, nade door variatio[8147 chars]le!'] != [\" Fo[272 chars]ting a classic Sicilian, nade door variation o[8150 chars]le!']"
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond",
|
| 851 |
+
"trace": "(line 2567) AssertionError: Lists differ: [\" Fo[268 chars]ating, so soft, it would make JD power and her[196 chars]ke.\"] != [\" Fo[268 chars]ating so soft, it would make JD power and her [195 chars]ke.\"]"
|
| 852 |
+
}
|
| 853 |
+
],
|
| 854 |
+
"single": [
|
| 855 |
+
{
|
| 856 |
+
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation",
|
| 857 |
+
"trace": "(line 1893) AssertionError: Lists differ: [{'te[649 chars] 45.38)}, {'text': ' can discover in it but li[620 chars]16)}] != [{'te[649 chars] 45.36)}, {'text': ' can discover in it but li[620 chars]16)}]"
|
| 858 |
+
},
|
| 859 |
+
{
|
| 860 |
+
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard",
|
| 861 |
+
"trace": "(line 2798) AssertionError: Lists differ: [\" Fo[272 chars]ting of classics, Sicilian, nade door variatio[8147 chars]le!'] != [\" Fo[272 chars]ting a classic Sicilian, nade door variation o[8150 chars]le!']"
|
| 862 |
+
},
|
| 863 |
+
{
|
| 864 |
+
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond",
|
| 865 |
+
"trace": "(line 2567) AssertionError: Lists differ: [\" Fo[268 chars]ating, so soft, it would make JD power and her[196 chars]ke.\"] != [\" Fo[268 chars]ating so soft, it would make JD power and her [195 chars]ke.\"]"
|
| 866 |
+
}
|
| 867 |
+
]
|
| 868 |
+
},
|
| 869 |
+
"job_link": {
|
| 870 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042971",
|
| 871 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042925"
|
| 872 |
+
},
|
| 873 |
+
"captured_info": {
|
| 874 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042971#step:16:1",
|
| 875 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/19685364855/job/56390042925#step:16:1"
|
| 876 |
+
}
|
| 877 |
+
}
|
| 878 |
+
}
|