chessbenchmate / add_mating_data.py
joshuakgao's picture
Update add_mating_data.py
41d8875 verified
"""Annotate a .bag file that stores (FEN, move, win-probability) triples with mate-in-N information produced by Stockfish.
Mate labels:
"#" – the move itself gives immediate checkmate
+N – mover can force mate in N plies
-N – mover will be mated in N plies
"-" – no forced mate detected within engine depth / time limit
"""
import math
import os
import tempfile
from multiprocessing import Pool, cpu_count
from pathlib import Path
import chess
import chess.engine
import psutil
from tqdm import tqdm
from athena.datasets.chessbenchmate.utils.bagz import BagReader, BagWriter
from athena.datasets.chessbenchmate.utils.constants import CODERS
ENGINE_PATH = "models/stockfish"
ENGINE_LIMIT = chess.engine.Limit(time=0.05)
# Global variable for each worker process
engine = None
def init_worker():
"""Initialize engine for each worker process."""
global engine
engine = chess.engine.SimpleEngine.popen_uci(ENGINE_PATH)
# Configure engine for faster analysis
engine.configure({"Threads": 1})
def close_worker():
"""Close engine when worker finishes."""
global engine
if engine is not None:
try:
engine.close()
except Exception:
pass
def print_memory_usage(description=""):
"""Print current and total available memory usage."""
process = psutil.Process(os.getpid())
memory_mb = process.memory_info().rss / 1024 / 1024
total_mem_mb = psutil.virtual_memory().total / 1024 / 1024
print(f"{description}Memory usage: {memory_mb:.2f} MB / {total_mem_mb:.2f} MB total", flush=True)
def annotate_single_record(record: bytes) -> bytes:
"""Annotate a single record with mate-in-N information."""
global engine
assert engine is not None, "Engine not initialized in worker"
try:
fen, move_str, _, _ = CODERS["action_value_with_mate"].decode(record)
board = chess.Board(fen)
win_prob, mate_label = stockfish_evaluate(board, chess.Move.from_uci(move_str), engine)
assert 0.0 <= win_prob <= 1.0, f"Win probability should be between 0 and 1, got {win_prob}"
# Validate mate_label format
valid_mate_label = (
mate_label == "#"
or mate_label == "-"
or (isinstance(mate_label, int) and abs(mate_label) <= 100) # Reasonable mate distance
)
assert valid_mate_label, f"Invalid mate label: {mate_label}"
return CODERS["action_value_with_mate"].encode((fen, move_str, win_prob, mate_label))
except Exception as e:
print(f"Error processing record: {e}")
raise
def stockfish_evaluate(
board: chess.Board, move: chess.Move, engine: chess.engine.SimpleEngine
) -> tuple[float, str | int]:
"""Evaluate a position after a given move using Stockfish.
Returns:
win_prob: float between 0 and 1
mate_label: str or int indicating mate status
"""
mover = board.turn
board.push(move)
if board.is_checkmate():
return 1.0, "#"
info = engine.analyse(board, ENGINE_LIMIT)
score = info.get("score")
assert score is not None, "Engine analysis did not return a score"
pov_score = score.pov(mover)
if pov_score.is_mate():
mate_val = pov_score.mate()
if mate_val is not None:
# Positive mate_val means mover can force mate, negative means opponent can
win_prob = 1.0 if mate_val > 0 else 0.0
mate_label = mate_val
else:
win_prob = 0.5 # Should not happen with is_mate(), but fallback
mate_label = "-"
else:
cp = pov_score.score()
assert cp is not None, "Centipawn score should not be None for non-mate scores"
win_prob = 1 / (1 + math.exp(-cp / 173.718))
win_prob = min(max(win_prob, 1e-6), 1 - 1e-6) # clamp safely
mate_label = "-"
# Additional validation
assert 0.0 <= win_prob <= 1.0, f"Win probability out of bounds: {win_prob}"
return win_prob, mate_label
def add_mate_annotations(bag_path: Path) -> None:
"""Annotate a .bag file with mate-in-N information in-place."""
if not bag_path.exists():
print(f"File not found: {bag_path}")
return
reader = BagReader(str(bag_path))
records = list(reader)
if len(records) == 0:
print(f"No records found in {bag_path}")
return
# Create temporary file for writing
with tempfile.NamedTemporaryFile(delete=False, suffix=".bag", dir=bag_path.parent) as temp_file:
temp_path = temp_file.name
try:
writer = BagWriter(temp_path)
print(
f"Processing {len(records)} records from {bag_path.name} using {cpu_count()} processes"
)
# Single record processing approach
with Pool(processes=cpu_count(), initializer=init_worker) as pool:
for i, annotated_record in tqdm(enumerate(
pool.imap(annotate_single_record, records)),
total=len(records),
unit="record",
desc=f"Annotating {bag_path.name}",
):
writer.write(annotated_record)
if i % 10000 == 0:
print_memory_usage(f"Processed {i} records. ")
writer.close()
# Replace original file with the temporary file
os.replace(temp_path, bag_path)
print(f"Successfully annotated {bag_path} with {len(records)} records")
except Exception as e:
# Clean up temporary file on error
if os.path.exists(temp_path):
os.unlink(temp_path)
print(f"Error processing {bag_path}: {e}")
raise e
def main():
"""Main entry point to process .bag files with optional SLURM array division."""
data_dir = Path("src/athena/datasets/chessbenchmate/data/train")
bag_files = sorted(list(data_dir.glob("*.bag")))
num_files = len(bag_files)
print(f"Found {num_files} bag files in {data_dir}")
# Detect SLURM environment variables
slurm_task_id = int(os.environ.get("SLURM_ARRAY_TASK_ID", "0"))
slurm_array_size = int(os.environ.get("SLURM_ARRAY_TASK_COUNT", "1"))
# Divide files evenly among SLURM array tasks
chunk_size = (num_files + slurm_array_size - 1) // slurm_array_size
start = slurm_task_id * chunk_size
end = min(start + chunk_size, num_files)
my_files = bag_files[start:end]
print(
f"[Job {slurm_task_id}/{slurm_array_size}] Processing {len(my_files)} files "
f"from index {start} to {end - 1}"
)
for bag_file in tqdm(my_files, total=len(my_files), desc=f"Job {slurm_task_id}"):
add_mate_annotations(bag_file)
print(f"[Job {slurm_task_id}] Completed {len(my_files)} files successfully.")
if __name__ == "__main__":
main()