hf-transformers-bot commited on
Commit
5fa01d6
·
verified ·
1 Parent(s): 760ab5c

Upload 2025-11-18/ci_results_run_models_gpu/new_failures.json with huggingface_hub

Browse files
2025-11-18/ci_results_run_models_gpu/new_failures.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "albert": {
3
+ "single-gpu": [],
4
+ "multi-gpu": [
5
+ "tests/models/albert/test_modeling_albert.py::AlbertModelTest::test_flash_attn_2_inference_equivalence_right_padding"
6
+ ]
7
+ },
8
+ "bart": {
9
+ "single-gpu": [
10
+ "tests/models/bart/test_modeling_bart.py::BartModelTest::test_flash_attn_2_fp32_ln",
11
+ "tests/models/bart/test_modeling_bart.py::BartStandaloneDecoderModelTest::test_flash_attn_2_fp32_ln"
12
+ ],
13
+ "multi-gpu": []
14
+ },
15
+ "bert": {
16
+ "single-gpu": [
17
+ "tests/models/bert/test_modeling_bert.py::BertModelTest::test_flash_attn_2_fp32_ln",
18
+ "tests/models/bert/test_modeling_bert.py::BertModelTest::test_flash_attn_2_inference_equivalence"
19
+ ],
20
+ "multi-gpu": []
21
+ },
22
+ "bert_generation": {
23
+ "single-gpu": [
24
+ "tests/models/bert_generation/test_modeling_bert_generation.py::BertGenerationEncoderTest::test_flash_attn_2_fp32_ln"
25
+ ],
26
+ "multi-gpu": []
27
+ },
28
+ "data2vec": {
29
+ "single-gpu": [
30
+ "tests/models/data2vec/test_modeling_data2vec_text.py::Data2VecTextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
31
+ ],
32
+ "multi-gpu": []
33
+ },
34
+ "distilbert": {
35
+ "single-gpu": [],
36
+ "multi-gpu": [
37
+ "tests/models/distilbert/test_modeling_distilbert.py::DistilBertModelTest::test_flash_attn_2_inference_equivalence_right_padding"
38
+ ]
39
+ },
40
+ "electra": {
41
+ "single-gpu": [
42
+ "tests/models/electra/test_modeling_electra.py::ElectraModelTest::test_flash_attn_2_inference_equivalence"
43
+ ],
44
+ "multi-gpu": []
45
+ },
46
+ "ernie": {
47
+ "single-gpu": [
48
+ "tests/models/ernie/test_modeling_ernie.py::ErnieModelTest::test_flash_attn_2_inference_equivalence_right_padding"
49
+ ],
50
+ "multi-gpu": []
51
+ },
52
+ "ernie4_5_moe": {
53
+ "single-gpu": [
54
+ "tests/models/ernie4_5_moe/test_modeling_ernie4_5_moe.py::Ernie4_5_MoeModelTest::test_flash_attn_2_equivalence"
55
+ ],
56
+ "multi-gpu": []
57
+ },
58
+ "flex_olmo": {
59
+ "single-gpu": [
60
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attn_2_equivalence"
61
+ ],
62
+ "multi-gpu": [
63
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attn_2_equivalence"
64
+ ]
65
+ },
66
+ "hunyuan_v1_moe": {
67
+ "single-gpu": [],
68
+ "multi-gpu": [
69
+ "tests/models/hunyuan_v1_moe/test_modeling_hunyuan_v1_moe.py::HunYuanMoEV1ModelTest::test_flash_attn_2_equivalence"
70
+ ]
71
+ },
72
+ "mixtral": {
73
+ "single-gpu": [
74
+ "tests/models/mixtral/test_modeling_mixtral.py::MixtralModelTest::test_flash_attn_2_equivalence",
75
+ "tests/models/mixtral/test_modeling_mixtral.py::MixtralModelTest::test_flash_attn_2_fp32_ln"
76
+ ],
77
+ "multi-gpu": [
78
+ "tests/models/mixtral/test_modeling_mixtral.py::MixtralModelTest::test_flash_attn_2_equivalence"
79
+ ]
80
+ },
81
+ "mllama": {
82
+ "single-gpu": [
83
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForCausalLMModelTest::test_flash_attn_2_fp32_ln",
84
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_eager_matches_fa2_generate",
85
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_flash_attn_2_fp32_ln",
86
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
87
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
88
+ ],
89
+ "multi-gpu": []
90
+ },
91
+ "mm_grounding_dino": {
92
+ "single-gpu": [
93
+ "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py::MMGroundingDinoModelTest::test_flash_attn_2_inference_equivalence",
94
+ "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py::MMGroundingDinoModelTest::test_flash_attn_2_inference_equivalence_right_padding"
95
+ ],
96
+ "multi-gpu": []
97
+ },
98
+ "modernbert_decoder": {
99
+ "single-gpu": [],
100
+ "multi-gpu": [
101
+ "tests/models/modernbert_decoder/test_modeling_modernbert_decoder.py::ModernBertDecoderModelTest::test_flash_attn_2_fp32_ln"
102
+ ]
103
+ },
104
+ "moshi": {
105
+ "single-gpu": [],
106
+ "multi-gpu": [
107
+ "tests/models/moshi/test_modeling_moshi.py::MoshiDecoderTest::test_flash_attn_2_fp32_ln",
108
+ "tests/models/moshi/test_modeling_moshi.py::MoshiDecoderTest::test_flash_attn_2_inference_equivalence",
109
+ "tests/models/moshi/test_modeling_moshi.py::MoshiDecoderTest::test_flash_attn_2_inference_equivalence_right_padding",
110
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_eager_matches_fa2_generate",
111
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_fp32_ln",
112
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_from_config",
113
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_inference_equivalence",
114
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_inference_equivalence_right_padding"
115
+ ]
116
+ },
117
+ "qwen2_5_vl": {
118
+ "single-gpu": [],
119
+ "multi-gpu": [
120
+ "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLModelTest::test_flash_attn_2_fp32_ln",
121
+ "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_wo_image_flashatt2"
122
+ ]
123
+ },
124
+ "roberta": {
125
+ "single-gpu": [],
126
+ "multi-gpu": [
127
+ "tests/models/roberta/test_modeling_roberta.py::RobertaModelTest::test_flash_attn_2_inference_equivalence_right_padding"
128
+ ]
129
+ },
130
+ "roberta_prelayernorm": {
131
+ "single-gpu": [
132
+ "tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py::RobertaPreLayerNormModelTest::test_flash_attn_2_inference_equivalence"
133
+ ],
134
+ "multi-gpu": []
135
+ },
136
+ "xmod": {
137
+ "single-gpu": [
138
+ "tests/models/xmod/test_modeling_xmod.py::XmodModelTest::test_flash_attn_2_inference_equivalence_right_padding"
139
+ ],
140
+ "multi-gpu": [
141
+ "tests/models/xmod/test_modeling_xmod.py::XmodModelTest::test_flash_attn_2_inference_equivalence"
142
+ ]
143
+ }
144
+ }