hf-transformers-bot commited on
Commit
b3add7e
·
verified ·
1 Parent(s): a0ecc06

Upload 2026-01-05/runs/26167-20722104240/ci_results_run_models_gpu/model_results.json with huggingface_hub

Browse files
2026-01-05/runs/26167-20722104240/ci_results_run_models_gpu/model_results.json ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "models_qwen2": {
3
+ "failed": {
4
+ "PyTorch": {
5
+ "unclassified": 0,
6
+ "single": 1,
7
+ "multi": 1
8
+ },
9
+ "Tokenizers": {
10
+ "unclassified": 0,
11
+ "single": 0,
12
+ "multi": 0
13
+ },
14
+ "Pipelines": {
15
+ "unclassified": 0,
16
+ "single": 0,
17
+ "multi": 0
18
+ },
19
+ "Trainer": {
20
+ "unclassified": 0,
21
+ "single": 0,
22
+ "multi": 0
23
+ },
24
+ "ONNX": {
25
+ "unclassified": 0,
26
+ "single": 0,
27
+ "multi": 0
28
+ },
29
+ "Auto": {
30
+ "unclassified": 0,
31
+ "single": 0,
32
+ "multi": 0
33
+ },
34
+ "Quantization": {
35
+ "unclassified": 0,
36
+ "single": 0,
37
+ "multi": 0
38
+ },
39
+ "Unclassified": {
40
+ "unclassified": 0,
41
+ "single": 0,
42
+ "multi": 0
43
+ }
44
+ },
45
+ "errors": 0,
46
+ "success": 375,
47
+ "skipped": 203,
48
+ "time_spent": [
49
+ 234.43,
50
+ 237.14
51
+ ],
52
+ "error": false,
53
+ "failures": {
54
+ "single": [
55
+ {
56
+ "line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_model_450m_logits",
57
+ "trace": "(line 82) AssertionError: Tensor-likes are not close!"
58
+ }
59
+ ],
60
+ "multi": [
61
+ {
62
+ "line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_model_450m_logits",
63
+ "trace": "(line 82) AssertionError: Tensor-likes are not close!"
64
+ }
65
+ ]
66
+ },
67
+ "job_link": {
68
+ "single": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470048",
69
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470009"
70
+ },
71
+ "captured_info": {
72
+ "single": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470048#step:16:1",
73
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470009#step:16:1"
74
+ }
75
+ },
76
+ "models_qwen2_5_omni": {
77
+ "failed": {
78
+ "PyTorch": {
79
+ "unclassified": 0,
80
+ "single": 0,
81
+ "multi": 1
82
+ },
83
+ "Tokenizers": {
84
+ "unclassified": 0,
85
+ "single": 0,
86
+ "multi": 0
87
+ },
88
+ "Pipelines": {
89
+ "unclassified": 0,
90
+ "single": 0,
91
+ "multi": 0
92
+ },
93
+ "Trainer": {
94
+ "unclassified": 0,
95
+ "single": 0,
96
+ "multi": 0
97
+ },
98
+ "ONNX": {
99
+ "unclassified": 0,
100
+ "single": 0,
101
+ "multi": 0
102
+ },
103
+ "Auto": {
104
+ "unclassified": 0,
105
+ "single": 0,
106
+ "multi": 0
107
+ },
108
+ "Quantization": {
109
+ "unclassified": 0,
110
+ "single": 0,
111
+ "multi": 0
112
+ },
113
+ "Unclassified": {
114
+ "unclassified": 0,
115
+ "single": 0,
116
+ "multi": 0
117
+ }
118
+ },
119
+ "errors": 0,
120
+ "success": 304,
121
+ "skipped": 245,
122
+ "time_spent": [
123
+ 253.69,
124
+ 240.99
125
+ ],
126
+ "error": false,
127
+ "failures": {
128
+ "multi": [
129
+ {
130
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_multi_gpu_data_parallel_forward",
131
+ "trace": "(line 769) StopIteration: Caught StopIteration in replica 1 on device 1."
132
+ }
133
+ ]
134
+ },
135
+ "job_link": {
136
+ "single": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470034",
137
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470036"
138
+ },
139
+ "captured_info": {}
140
+ },
141
+ "models_qwen2_5_vl": {
142
+ "failed": {
143
+ "PyTorch": {
144
+ "unclassified": 0,
145
+ "single": 1,
146
+ "multi": 3
147
+ },
148
+ "Tokenizers": {
149
+ "unclassified": 0,
150
+ "single": 0,
151
+ "multi": 0
152
+ },
153
+ "Pipelines": {
154
+ "unclassified": 0,
155
+ "single": 0,
156
+ "multi": 0
157
+ },
158
+ "Trainer": {
159
+ "unclassified": 0,
160
+ "single": 0,
161
+ "multi": 0
162
+ },
163
+ "ONNX": {
164
+ "unclassified": 0,
165
+ "single": 0,
166
+ "multi": 0
167
+ },
168
+ "Auto": {
169
+ "unclassified": 0,
170
+ "single": 0,
171
+ "multi": 0
172
+ },
173
+ "Quantization": {
174
+ "unclassified": 0,
175
+ "single": 0,
176
+ "multi": 0
177
+ },
178
+ "Unclassified": {
179
+ "unclassified": 0,
180
+ "single": 0,
181
+ "multi": 0
182
+ }
183
+ },
184
+ "errors": 0,
185
+ "success": 321,
186
+ "skipped": 123,
187
+ "time_spent": [
188
+ 288.04,
189
+ 284.71
190
+ ],
191
+ "error": false,
192
+ "failures": {
193
+ "single": [
194
+ {
195
+ "line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions",
196
+ "trace": "(line 587) AssertionError: 'syst[73 chars]ant\\nThe dog in the picture appears to be a La[94 chars]t in' != 'syst[73 chars]ant\\n addCriterion\\nThe dog in the picture app[95 chars]h is'"
197
+ }
198
+ ],
199
+ "multi": [
200
+ {
201
+ "line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions",
202
+ "trace": "(line 587) AssertionError: 'syst[73 chars]ant\\nThe dog in the picture appears to be a La[94 chars]t in' != 'syst[73 chars]ant\\n addCriterion\\nThe dog in the picture app[95 chars]h is'"
203
+ },
204
+ {
205
+ "line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_wo_image",
206
+ "trace": "(line 523) OSError: Qwen/Qwen2.5-VL-7B-Instruct does not appear to have a file named model-00001-of-00005.safetensors. Checkout 'https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct/tree/main' for available files."
207
+ },
208
+ {
209
+ "line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_with_video",
210
+ "trace": "(line 711) AssertionError: Lists differ: ['sys[124 chars]h a person standing on the baseline, preparing[74 chars]irt'] != ['sys[124 chars]h a player standing on the baseline, preparing[61 chars]ts,']"
211
+ }
212
+ ]
213
+ },
214
+ "job_link": {
215
+ "single": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470040",
216
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470000"
217
+ },
218
+ "captured_info": {
219
+ "single": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470040#step:16:1",
220
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20722104240/job/59488470000#step:16:1"
221
+ }
222
+ }
223
+ }