hf-transformers-bot commited on
Commit
7936868
·
verified ·
1 Parent(s): 4d38ed4

Upload 2025-12-12/ci_results_run_models_gpu/new_failures.json with huggingface_hub

Browse files
2025-12-12/ci_results_run_models_gpu/new_failures.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "albert": {
3
+ "single-gpu": [],
4
+ "multi-gpu": [
5
+ "tests/models/albert/test_modeling_albert.py::AlbertModelTest::test_flash_attn_2_inference_equivalence_right_padding"
6
+ ]
7
+ },
8
+ "bert": {
9
+ "single-gpu": [
10
+ "tests/models/bert/test_modeling_bert.py::BertModelTest::test_flash_attn_2_inference_equivalence_right_padding"
11
+ ],
12
+ "multi-gpu": []
13
+ },
14
+ "electra": {
15
+ "single-gpu": [
16
+ "tests/models/electra/test_modeling_electra.py::ElectraModelTest::test_flash_attn_2_inference_equivalence"
17
+ ],
18
+ "multi-gpu": []
19
+ },
20
+ "idefics2": {
21
+ "single-gpu": [
22
+ "tests/models/idefics2/test_modeling_idefics2.py::Idefics2ForConditionalGenerationIntegrationTest::test_flash_attn_2_eager_equivalence"
23
+ ],
24
+ "multi-gpu": [
25
+ "tests/models/idefics2/test_modeling_idefics2.py::Idefics2ForConditionalGenerationIntegrationTest::test_flash_attn_2_eager_equivalence"
26
+ ]
27
+ },
28
+ "nemotron": {
29
+ "single-gpu": [],
30
+ "multi-gpu": [
31
+ "tests/models/nemotron/test_modeling_nemotron.py::NemotronModelTest::test_flash_attn_2_equivalence"
32
+ ]
33
+ },
34
+ "roberta": {
35
+ "single-gpu": [
36
+ "tests/models/roberta/test_modeling_roberta.py::RobertaModelTest::test_flash_attn_2_inference_equivalence_right_padding"
37
+ ],
38
+ "multi-gpu": []
39
+ },
40
+ "roberta_prelayernorm": {
41
+ "single-gpu": [
42
+ "tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py::RobertaPreLayerNormModelTest::test_flash_attn_2_inference_equivalence"
43
+ ],
44
+ "multi-gpu": []
45
+ }
46
+ }