hf-transformers-bot commited on
Commit
2eb43ae
·
verified ·
1 Parent(s): 6d8bb8e

Upload 2025-12-06/ci_results_run_models_gpu/new_failures.json with huggingface_hub

Browse files
2025-12-06/ci_results_run_models_gpu/new_failures.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "electra": {
3
+ "single-gpu": [
4
+ "tests/models/electra/test_modeling_electra.py::ElectraModelTest::test_flash_attn_2_inference_equivalence_right_padding"
5
+ ],
6
+ "multi-gpu": []
7
+ },
8
+ "ernie": {
9
+ "single-gpu": [
10
+ "tests/models/ernie/test_modeling_ernie.py::ErnieModelTest::test_flash_attn_2_inference_equivalence_right_padding"
11
+ ],
12
+ "multi-gpu": []
13
+ },
14
+ "flex_olmo": {
15
+ "single-gpu": [
16
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attn_2_equivalence"
17
+ ],
18
+ "multi-gpu": [
19
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
20
+ ]
21
+ },
22
+ "gpt_oss": {
23
+ "single-gpu": [
24
+ "tests/models/gpt_oss/test_modeling_gpt_oss.py::GptOssModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
25
+ ],
26
+ "multi-gpu": []
27
+ },
28
+ "mixtral": {
29
+ "single-gpu": [
30
+ "tests/models/mixtral/test_modeling_mixtral.py::MixtralModelTest::test_flash_attn_2_equivalence"
31
+ ],
32
+ "multi-gpu": []
33
+ },
34
+ "roberta": {
35
+ "single-gpu": [],
36
+ "multi-gpu": [
37
+ "tests/models/roberta/test_modeling_roberta.py::RobertaModelTest::test_flash_attn_2_inference_equivalence_right_padding"
38
+ ]
39
+ },
40
+ "xmod": {
41
+ "single-gpu": [
42
+ "tests/models/xmod/test_modeling_xmod.py::XmodModelTest::test_flash_attn_2_inference_equivalence_right_padding"
43
+ ],
44
+ "multi-gpu": []
45
+ }
46
+ }