AndreasThinks commited on
Commit
4c4778a
·
verified ·
1 Parent(s): b9d3d72

Upload eval_standard_benchmarks.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. eval_standard_benchmarks.py +2 -0
eval_standard_benchmarks.py CHANGED
@@ -12,6 +12,8 @@ benchmarks (MMLU, HellaSwag, ARC, etc.) using lm-evaluation-harness.
12
  # "peft>=0.7.0",
13
  # "huggingface-hub>=0.20.0",
14
  # "accelerate>=0.20.0",
 
 
15
  # ]
16
  # ///
17
 
 
12
  # "peft>=0.7.0",
13
  # "huggingface-hub>=0.20.0",
14
  # "accelerate>=0.20.0",
15
+ # "protobuf>=3.20.0",
16
+ # "sentencepiece>=0.1.99",
17
  # ]
18
  # ///
19