DarshanScripts commited on
Commit
06bc894
·
verified ·
1 Parent(s): ec70901

Upload stratego\benchmarking\run_benchmark.py with huggingface_hub

Browse files
stratego//benchmarking//run_benchmark.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # stratego/benchmarking/run_benchmark.py
2
+
3
+ from .run_game import run_game
4
+ from .metrics import init_metrics, update_metrics, summarize
5
+ from .csv_logger import create_benchmark_csv, write_summary_csv
6
+
7
+
8
+ def run_benchmark(agent0, agent1, games=10, size=6):
9
+ metrics = init_metrics()
10
+
11
+ f, writer, benchmark_csv = create_benchmark_csv(games)
12
+
13
+ for game_id in range(games):
14
+ result = run_game(agent0, agent1, size=size, seed=game_id)
15
+
16
+ writer.writerow([
17
+ game_id,
18
+ agent0.model_name,
19
+ agent1.model_name,
20
+ size,
21
+ result["winner"],
22
+ result["turns"],
23
+ result["invalid_moves_p0"],
24
+ result["invalid_moves_p1"],
25
+ result["repetitions"],
26
+ result["flag_captured"],
27
+ result["game_end_reason"]
28
+ ])
29
+
30
+ update_metrics(metrics, result)
31
+
32
+ f.close()
33
+
34
+ summary = summarize(metrics)
35
+ write_summary_csv(summary, benchmark_csv)
36
+
37
+ return summary, benchmark_csv