hf-transformers-bot commited on
Commit
a4bb744
·
verified ·
1 Parent(s): dea1ed4

Upload 2025-12-13/runs/25388-20185602908/ci_results_run_models_gpu/model_results.json with huggingface_hub

Browse files
2025-12-13/runs/25388-20185602908/ci_results_run_models_gpu/model_results.json ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "models_llava_next": {
3
+ "failed": {
4
+ "PyTorch": {
5
+ "unclassified": 0,
6
+ "single": 6,
7
+ "multi": 6
8
+ },
9
+ "Tokenizers": {
10
+ "unclassified": 0,
11
+ "single": 0,
12
+ "multi": 0
13
+ },
14
+ "Pipelines": {
15
+ "unclassified": 0,
16
+ "single": 0,
17
+ "multi": 0
18
+ },
19
+ "Trainer": {
20
+ "unclassified": 0,
21
+ "single": 0,
22
+ "multi": 0
23
+ },
24
+ "ONNX": {
25
+ "unclassified": 0,
26
+ "single": 0,
27
+ "multi": 0
28
+ },
29
+ "Auto": {
30
+ "unclassified": 0,
31
+ "single": 0,
32
+ "multi": 0
33
+ },
34
+ "Quantization": {
35
+ "unclassified": 0,
36
+ "single": 0,
37
+ "multi": 0
38
+ },
39
+ "Unclassified": {
40
+ "unclassified": 0,
41
+ "single": 0,
42
+ "multi": 0
43
+ }
44
+ },
45
+ "errors": 0,
46
+ "success": 345,
47
+ "skipped": 135,
48
+ "time_spent": [
49
+ 953.14,
50
+ 944.38
51
+ ],
52
+ "error": false,
53
+ "failures": {
54
+ "multi": [
55
+ {
56
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test",
57
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
58
+ },
59
+ {
60
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
61
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
62
+ },
63
+ {
64
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_different_resolutions",
65
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
66
+ },
67
+ {
68
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_matches_single",
69
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
70
+ },
71
+ {
72
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_full_vision_state_selection",
73
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
74
+ },
75
+ {
76
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_unk_token",
77
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
78
+ }
79
+ ],
80
+ "single": [
81
+ {
82
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test",
83
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
84
+ },
85
+ {
86
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
87
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
88
+ },
89
+ {
90
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_different_resolutions",
91
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
92
+ },
93
+ {
94
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_matches_single",
95
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
96
+ },
97
+ {
98
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_full_vision_state_selection",
99
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
100
+ },
101
+ {
102
+ "line": "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationIntegrationTest::test_small_model_integration_test_unk_token",
103
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::BFloat16 != c10::Half"
104
+ }
105
+ ]
106
+ },
107
+ "job_link": {
108
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164287",
109
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164286"
110
+ },
111
+ "captured_info": {}
112
+ },
113
+ "models_llava_next_video": {
114
+ "failed": {
115
+ "PyTorch": {
116
+ "unclassified": 0,
117
+ "single": 4,
118
+ "multi": 4
119
+ },
120
+ "Tokenizers": {
121
+ "unclassified": 0,
122
+ "single": 0,
123
+ "multi": 0
124
+ },
125
+ "Pipelines": {
126
+ "unclassified": 0,
127
+ "single": 0,
128
+ "multi": 0
129
+ },
130
+ "Trainer": {
131
+ "unclassified": 0,
132
+ "single": 0,
133
+ "multi": 0
134
+ },
135
+ "ONNX": {
136
+ "unclassified": 0,
137
+ "single": 0,
138
+ "multi": 0
139
+ },
140
+ "Auto": {
141
+ "unclassified": 0,
142
+ "single": 0,
143
+ "multi": 0
144
+ },
145
+ "Quantization": {
146
+ "unclassified": 0,
147
+ "single": 0,
148
+ "multi": 0
149
+ },
150
+ "Unclassified": {
151
+ "unclassified": 0,
152
+ "single": 0,
153
+ "multi": 0
154
+ }
155
+ },
156
+ "errors": 0,
157
+ "success": 357,
158
+ "skipped": 103,
159
+ "time_spent": [
160
+ 206.19,
161
+ 196.8
162
+ ],
163
+ "error": false,
164
+ "failures": {
165
+ "single": [
166
+ {
167
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test",
168
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
169
+ },
170
+ {
171
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
172
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
173
+ },
174
+ {
175
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_different_vision_types",
176
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
177
+ },
178
+ {
179
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_matches_single",
180
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
181
+ }
182
+ ],
183
+ "multi": [
184
+ {
185
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test",
186
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
187
+ },
188
+ {
189
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
190
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
191
+ },
192
+ {
193
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_different_vision_types",
194
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
195
+ },
196
+ {
197
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_matches_single",
198
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
199
+ }
200
+ ]
201
+ },
202
+ "job_link": {
203
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164275",
204
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164315"
205
+ },
206
+ "captured_info": {}
207
+ },
208
+ "models_llava_onevision": {
209
+ "failed": {
210
+ "PyTorch": {
211
+ "unclassified": 0,
212
+ "single": 0,
213
+ "multi": 0
214
+ },
215
+ "Tokenizers": {
216
+ "unclassified": 0,
217
+ "single": 0,
218
+ "multi": 0
219
+ },
220
+ "Pipelines": {
221
+ "unclassified": 0,
222
+ "single": 0,
223
+ "multi": 0
224
+ },
225
+ "Trainer": {
226
+ "unclassified": 0,
227
+ "single": 0,
228
+ "multi": 0
229
+ },
230
+ "ONNX": {
231
+ "unclassified": 0,
232
+ "single": 0,
233
+ "multi": 0
234
+ },
235
+ "Auto": {
236
+ "unclassified": 0,
237
+ "single": 0,
238
+ "multi": 0
239
+ },
240
+ "Quantization": {
241
+ "unclassified": 0,
242
+ "single": 0,
243
+ "multi": 0
244
+ },
245
+ "Unclassified": {
246
+ "unclassified": 0,
247
+ "single": 2,
248
+ "multi": 2
249
+ }
250
+ },
251
+ "errors": 0,
252
+ "success": 405,
253
+ "skipped": 111,
254
+ "time_spent": [
255
+ 176.39,
256
+ 175.56
257
+ ],
258
+ "error": false,
259
+ "failures": {
260
+ "multi": [
261
+ {
262
+ "line": "tests/models/llava_onevision/test_processing_llava_onevision.py::LlavaOnevisionProcessorTest::test_apply_chat_template_video_0",
263
+ "trace": "(line 707) TypeError: Incorrect format used for video. Should be an url linking to an video or a local path."
264
+ },
265
+ {
266
+ "line": "tests/models/llava_onevision/test_processing_llava_onevision.py::LlavaOnevisionProcessorTest::test_apply_chat_template_video_1",
267
+ "trace": "(line 707) TypeError: Incorrect format used for video. Should be an url linking to an video or a local path."
268
+ }
269
+ ],
270
+ "single": [
271
+ {
272
+ "line": "tests/models/llava_onevision/test_processing_llava_onevision.py::LlavaOnevisionProcessorTest::test_apply_chat_template_video_0",
273
+ "trace": "(line 707) TypeError: Incorrect format used for video. Should be an url linking to an video or a local path."
274
+ },
275
+ {
276
+ "line": "tests/models/llava_onevision/test_processing_llava_onevision.py::LlavaOnevisionProcessorTest::test_apply_chat_template_video_1",
277
+ "trace": "(line 707) TypeError: Incorrect format used for video. Should be an url linking to an video or a local path."
278
+ }
279
+ ]
280
+ },
281
+ "job_link": {
282
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164301",
283
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164285"
284
+ },
285
+ "captured_info": {}
286
+ },
287
+ "models_qwen2_5_omni": {
288
+ "failed": {
289
+ "PyTorch": {
290
+ "unclassified": 0,
291
+ "single": 3,
292
+ "multi": 4
293
+ },
294
+ "Tokenizers": {
295
+ "unclassified": 0,
296
+ "single": 0,
297
+ "multi": 0
298
+ },
299
+ "Pipelines": {
300
+ "unclassified": 0,
301
+ "single": 0,
302
+ "multi": 0
303
+ },
304
+ "Trainer": {
305
+ "unclassified": 0,
306
+ "single": 0,
307
+ "multi": 0
308
+ },
309
+ "ONNX": {
310
+ "unclassified": 0,
311
+ "single": 0,
312
+ "multi": 0
313
+ },
314
+ "Auto": {
315
+ "unclassified": 0,
316
+ "single": 0,
317
+ "multi": 0
318
+ },
319
+ "Quantization": {
320
+ "unclassified": 0,
321
+ "single": 0,
322
+ "multi": 0
323
+ },
324
+ "Unclassified": {
325
+ "unclassified": 0,
326
+ "single": 0,
327
+ "multi": 0
328
+ }
329
+ },
330
+ "errors": 0,
331
+ "success": 292,
332
+ "skipped": 237,
333
+ "time_spent": [
334
+ 234.53,
335
+ 264.22
336
+ ],
337
+ "error": false,
338
+ "failures": {
339
+ "multi": [
340
+ {
341
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_multi_gpu_data_parallel_forward",
342
+ "trace": "(line 769) StopIteration: Caught StopIteration in replica 1 on device 1."
343
+ },
344
+ {
345
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test",
346
+ "trace": "(line 692) AssertionError: \"syst[108 chars]d is a glass shattering. The dog in the pictur[22 chars]ver.\" != \"syst[108 chars]d is glass shattering, and the dog is a Labrador Retriever.\""
347
+ },
348
+ {
349
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch",
350
+ "trace": "(line 734) AssertionError: Lists differ: [\"sys[109 chars]d is a glass shattering. The dog in the pictur[211 chars]er.\"] != [\"sys[109 chars]d is glass shattering, and the dog is a Labrad[185 chars]er.\"]"
351
+ },
352
+ {
353
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_w_audio",
354
+ "trace": "(line 2334) AttributeError: 'NoneType' object has no attribute 'shape'"
355
+ }
356
+ ],
357
+ "single": [
358
+ {
359
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test",
360
+ "trace": "(line 692) AssertionError: \"syst[108 chars]d is a glass shattering. The dog in the pictur[22 chars]ver.\" != \"syst[108 chars]d is glass shattering, and the dog is a Labrador Retriever.\""
361
+ },
362
+ {
363
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch",
364
+ "trace": "(line 734) AssertionError: Lists differ: [\"sys[109 chars]d is a glass shattering. The dog in the pictur[211 chars]er.\"] != [\"sys[109 chars]d is glass shattering, and the dog is a Labrad[185 chars]er.\"]"
365
+ },
366
+ {
367
+ "line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_w_audio",
368
+ "trace": "(line 2334) AttributeError: 'NoneType' object has no attribute 'shape'"
369
+ }
370
+ ]
371
+ },
372
+ "job_link": {
373
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164306",
374
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164274"
375
+ },
376
+ "captured_info": {
377
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164306#step:16:1",
378
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164274#step:16:1"
379
+ }
380
+ },
381
+ "models_qwen3_omni_moe": {
382
+ "failed": {
383
+ "PyTorch": {
384
+ "unclassified": 0,
385
+ "single": 4,
386
+ "multi": 6
387
+ },
388
+ "Tokenizers": {
389
+ "unclassified": 0,
390
+ "single": 0,
391
+ "multi": 0
392
+ },
393
+ "Pipelines": {
394
+ "unclassified": 0,
395
+ "single": 0,
396
+ "multi": 0
397
+ },
398
+ "Trainer": {
399
+ "unclassified": 0,
400
+ "single": 0,
401
+ "multi": 0
402
+ },
403
+ "ONNX": {
404
+ "unclassified": 0,
405
+ "single": 0,
406
+ "multi": 0
407
+ },
408
+ "Auto": {
409
+ "unclassified": 0,
410
+ "single": 0,
411
+ "multi": 0
412
+ },
413
+ "Quantization": {
414
+ "unclassified": 0,
415
+ "single": 0,
416
+ "multi": 0
417
+ },
418
+ "Unclassified": {
419
+ "unclassified": 0,
420
+ "single": 0,
421
+ "multi": 0
422
+ }
423
+ },
424
+ "errors": 0,
425
+ "success": 227,
426
+ "skipped": 191,
427
+ "time_spent": [
428
+ 1338.66,
429
+ 379.8
430
+ ],
431
+ "error": false,
432
+ "failures": {
433
+ "single": [
434
+ {
435
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test",
436
+ "trace": "(line 725) AttributeError: Qwen3OmniMoeTalkerForConditionalGeneration has no attribute `lm_head`"
437
+ },
438
+ {
439
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test_batch",
440
+ "trace": "(line 542) ValueError: The current `device_map` had weights offloaded to the disk, which needed to be re-saved. This is either because the weights are not in `safetensors` format, or because the model uses an internal weight format different than the one saved (i.e. most MoE models). Please provide an `offload_folder` for them in `from_pretrained`."
441
+ },
442
+ {
443
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test_multiturn",
444
+ "trace": "(line 725) AttributeError: Qwen3OmniMoeTalkerForConditionalGeneration has no attribute `lm_head`"
445
+ },
446
+ {
447
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test_w_audio",
448
+ "trace": "(line 542) ValueError: The current `device_map` had weights offloaded to the disk, which needed to be re-saved. This is either because the weights are not in `safetensors` format, or because the model uses an internal weight format different than the one saved (i.e. most MoE models). Please provide an `offload_folder` for them in `from_pretrained`."
449
+ }
450
+ ],
451
+ "multi": [
452
+ {
453
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniMoeThinkerForConditionalGenerationModelTest::test_model_parallelism",
454
+ "trace": "(line 708) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!"
455
+ },
456
+ {
457
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniMoeThinkerForConditionalGenerationModelTest::test_multi_gpu_data_parallel_forward",
458
+ "trace": "(line 769) StopIteration: Caught StopIteration in replica 1 on device 1."
459
+ },
460
+ {
461
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test",
462
+ "trace": "(line 725) AttributeError: Qwen3OmniMoeTalkerForConditionalGeneration has no attribute `lm_head`"
463
+ },
464
+ {
465
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test_batch",
466
+ "trace": "(line 725) AttributeError: Qwen3OmniMoeTalkerForConditionalGeneration has no attribute `lm_head`"
467
+ },
468
+ {
469
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test_multiturn",
470
+ "trace": "(line 725) AttributeError: Qwen3OmniMoeTalkerForConditionalGeneration has no attribute `lm_head`"
471
+ },
472
+ {
473
+ "line": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen3OmniModelIntegrationTest::test_small_model_integration_test_w_audio",
474
+ "trace": "(line 725) AttributeError: Qwen3OmniMoeTalkerForConditionalGeneration has no attribute `lm_head`"
475
+ }
476
+ ]
477
+ },
478
+ "job_link": {
479
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164277",
480
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164300"
481
+ },
482
+ "captured_info": {}
483
+ },
484
+ "models_video_llava": {
485
+ "failed": {
486
+ "PyTorch": {
487
+ "unclassified": 0,
488
+ "single": 4,
489
+ "multi": 4
490
+ },
491
+ "Tokenizers": {
492
+ "unclassified": 0,
493
+ "single": 0,
494
+ "multi": 0
495
+ },
496
+ "Pipelines": {
497
+ "unclassified": 0,
498
+ "single": 0,
499
+ "multi": 0
500
+ },
501
+ "Trainer": {
502
+ "unclassified": 0,
503
+ "single": 0,
504
+ "multi": 0
505
+ },
506
+ "ONNX": {
507
+ "unclassified": 0,
508
+ "single": 0,
509
+ "multi": 0
510
+ },
511
+ "Auto": {
512
+ "unclassified": 0,
513
+ "single": 0,
514
+ "multi": 0
515
+ },
516
+ "Quantization": {
517
+ "unclassified": 0,
518
+ "single": 0,
519
+ "multi": 0
520
+ },
521
+ "Unclassified": {
522
+ "unclassified": 0,
523
+ "single": 0,
524
+ "multi": 0
525
+ }
526
+ },
527
+ "errors": 0,
528
+ "success": 275,
529
+ "skipped": 77,
530
+ "time_spent": [
531
+ 183.86,
532
+ 185.63
533
+ ],
534
+ "error": false,
535
+ "failures": {
536
+ "single": [
537
+ {
538
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test",
539
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
540
+ },
541
+ {
542
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama",
543
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
544
+ },
545
+ {
546
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched",
547
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
548
+ },
549
+ {
550
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_mixed_inputs",
551
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
552
+ }
553
+ ],
554
+ "multi": [
555
+ {
556
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test",
557
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
558
+ },
559
+ {
560
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama",
561
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
562
+ },
563
+ {
564
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched",
565
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
566
+ },
567
+ {
568
+ "line": "tests/models/video_llava/test_modeling_video_llava.py::VideoLlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_mixed_inputs",
569
+ "trace": "(line 134) RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Half != c10::BFloat16"
570
+ }
571
+ ]
572
+ },
573
+ "job_link": {
574
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164296",
575
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164307"
576
+ },
577
+ "captured_info": {}
578
+ },
579
+ "models_zamba": {
580
+ "failed": {
581
+ "PyTorch": {
582
+ "unclassified": 0,
583
+ "single": 2,
584
+ "multi": 2
585
+ },
586
+ "Tokenizers": {
587
+ "unclassified": 0,
588
+ "single": 0,
589
+ "multi": 0
590
+ },
591
+ "Pipelines": {
592
+ "unclassified": 0,
593
+ "single": 0,
594
+ "multi": 0
595
+ },
596
+ "Trainer": {
597
+ "unclassified": 0,
598
+ "single": 0,
599
+ "multi": 0
600
+ },
601
+ "ONNX": {
602
+ "unclassified": 0,
603
+ "single": 0,
604
+ "multi": 0
605
+ },
606
+ "Auto": {
607
+ "unclassified": 0,
608
+ "single": 0,
609
+ "multi": 0
610
+ },
611
+ "Quantization": {
612
+ "unclassified": 0,
613
+ "single": 0,
614
+ "multi": 0
615
+ },
616
+ "Unclassified": {
617
+ "unclassified": 0,
618
+ "single": 0,
619
+ "multi": 0
620
+ }
621
+ },
622
+ "errors": 0,
623
+ "success": 147,
624
+ "skipped": 281,
625
+ "time_spent": [
626
+ 70.21,
627
+ 71.47
628
+ ],
629
+ "error": false,
630
+ "failures": {
631
+ "single": [
632
+ {
633
+ "line": "tests/models/zamba/test_modeling_zamba.py::ZambaModelIntegrationTest::test_simple_batched_generate_with_padding",
634
+ "trace": "(line 536) AssertionError: '<s> [21 chars] on this lovely evening? I hope you are doing well. I am doing' != '<s> [21 chars] on this lovely evening? I hope you are all doing well. I am'"
635
+ },
636
+ {
637
+ "line": "tests/models/zamba/test_modeling_zamba.py::ZambaModelIntegrationTest::test_simple_generate",
638
+ "trace": "(line 505) AssertionError: '<s> [21 chars] on this lovely evening? I hope you are doing well. I am doing' != '<s> [21 chars] on this lovely evening? I hope you are all doing well. I am'"
639
+ }
640
+ ],
641
+ "multi": [
642
+ {
643
+ "line": "tests/models/zamba/test_modeling_zamba.py::ZambaModelIntegrationTest::test_simple_batched_generate_with_padding",
644
+ "trace": "(line 536) AssertionError: '<s> [21 chars] on this lovely evening? I hope you are doing well. I am doing' != '<s> [21 chars] on this lovely evening? I hope you are all doing well. I am'"
645
+ },
646
+ {
647
+ "line": "tests/models/zamba/test_modeling_zamba.py::ZambaModelIntegrationTest::test_simple_generate",
648
+ "trace": "(line 505) AssertionError: '<s> [21 chars] on this lovely evening? I hope you are doing well. I am doing' != '<s> [21 chars] on this lovely evening? I hope you are all doing well. I am'"
649
+ }
650
+ ]
651
+ },
652
+ "job_link": {
653
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164288",
654
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164308"
655
+ },
656
+ "captured_info": {
657
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164288#step:16:1",
658
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164308#step:16:1"
659
+ }
660
+ },
661
+ "models_zamba2": {
662
+ "failed": {
663
+ "PyTorch": {
664
+ "unclassified": 0,
665
+ "single": 0,
666
+ "multi": 0
667
+ },
668
+ "Tokenizers": {
669
+ "unclassified": 0,
670
+ "single": 0,
671
+ "multi": 0
672
+ },
673
+ "Pipelines": {
674
+ "unclassified": 0,
675
+ "single": 0,
676
+ "multi": 0
677
+ },
678
+ "Trainer": {
679
+ "unclassified": 0,
680
+ "single": 0,
681
+ "multi": 0
682
+ },
683
+ "ONNX": {
684
+ "unclassified": 0,
685
+ "single": 0,
686
+ "multi": 0
687
+ },
688
+ "Auto": {
689
+ "unclassified": 0,
690
+ "single": 0,
691
+ "multi": 0
692
+ },
693
+ "Quantization": {
694
+ "unclassified": 0,
695
+ "single": 0,
696
+ "multi": 0
697
+ },
698
+ "Unclassified": {
699
+ "unclassified": 0,
700
+ "single": 0,
701
+ "multi": 0
702
+ }
703
+ },
704
+ "errors": 8,
705
+ "success": 216,
706
+ "skipped": 220,
707
+ "time_spent": [
708
+ 107.23,
709
+ 107.88
710
+ ],
711
+ "error": false,
712
+ "failures": {},
713
+ "job_link": {
714
+ "multi": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164299",
715
+ "single": "https://github.com/huggingface/transformers/actions/runs/20185602908/job/57956164293"
716
+ },
717
+ "captured_info": {}
718
+ }
719
+ }