joshuakgao commited on
Commit
a452ee2
·
verified ·
1 Parent(s): 6e9f6fd

Upload folder using huggingface_hub

Browse files
__pycache__/dataset.cpython-313.pyc ADDED
Binary file (5.32 kB). View file
 
add_mating_data.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Annotate a .bag file that stores (FEN, move, win-probability) triples with mate-in-N information produced by Stockfish.
2
+
3
+ Mate labels:
4
+ "#" – the move itself gives immediate checkmate
5
+ +N – mover can force mate in N plies
6
+ -N – mover will be mated in N plies
7
+ "-" – no forced mate detected within engine depth / time limit
8
+ """
9
+
10
+ import argparse
11
+ import os
12
+ from multiprocessing import Pool, cpu_count
13
+
14
+ import chess
15
+ import chess.engine
16
+ from tqdm import tqdm
17
+
18
+ from athena.datasets.chessbenchmate.utils.bagz import BagReader, BagWriter
19
+ from athena.datasets.chessbenchmate.utils.constants import CODERS
20
+
21
+ ENGINE_PATH = "models/stockfish"
22
+ ENGINE_LIMIT = chess.engine.Limit(time=0.05)
23
+
24
+
25
+ def annotate_single_record(record: bytes) -> bytes:
26
+ """Annotate a single record with mate-in-N information."""
27
+ fen, move_str, win_prob = CODERS["action_value"].decode(record)
28
+ board = chess.Board(fen)
29
+ mover = board.turn
30
+ move = chess.Move.from_uci(move_str)
31
+ board.push(move)
32
+
33
+ mate_label: str | int
34
+
35
+ if board.is_checkmate():
36
+ mate_label = "#"
37
+ elif win_prob in (1.0, 0.0):
38
+ with chess.engine.SimpleEngine.popen_uci(ENGINE_PATH) as engine:
39
+ info = engine.analyse(board, ENGINE_LIMIT)
40
+ score = info.get("score")
41
+ if score and score.is_mate():
42
+ mate = score.pov(mover).mate()
43
+ mate_label = mate if mate is not None else "-"
44
+ else:
45
+ mate_label = "-"
46
+ else:
47
+ mate_label = "-"
48
+
49
+ return CODERS["action_value_with_mate"].encode((fen, move_str, win_prob, mate_label))
50
+
51
+
52
+ def add_mate_annotations(
53
+ input_bag: str, output_bag: str, max_datapoints: int | None = None
54
+ ) -> None:
55
+ """Annotate a .bag file with mate-in-N information."""
56
+ reader = BagReader(input_bag)
57
+ writer = BagWriter(output_bag)
58
+
59
+ records = list(reader)[:max_datapoints] if max_datapoints else list(reader)
60
+
61
+ print(cpu_count(), flush=True)
62
+ with Pool(processes=cpu_count()) as pool:
63
+ for annotated_record in tqdm(
64
+ pool.imap(annotate_single_record, records),
65
+ total=len(records),
66
+ unit="record",
67
+ ):
68
+ writer.write(annotated_record)
69
+
70
+ writer.close()
71
+
72
+
73
+ def main():
74
+ """Main entry point to parse command line arguments to add mating data to chessbench."""
75
+ parser = argparse.ArgumentParser(description="Annotate a .bag file with mate-in-N information.")
76
+ parser.add_argument("--input_bag", required=True, help="Path to input .bag file")
77
+ parser.add_argument("--output_bag", required=True, help="Path to output annotated .bag file")
78
+ parser.add_argument(
79
+ "--max_datapoints",
80
+ type=int,
81
+ default=None,
82
+ help="Maximum number of positions to analyse",
83
+ )
84
+ args = parser.parse_args()
85
+
86
+ # Ensure output directory exists
87
+ os.makedirs(os.path.dirname(args.output_bag), exist_ok=True)
88
+
89
+ add_mate_annotations(args.input_bag, args.output_bag, max_datapoints=args.max_datapoints)
90
+
91
+
92
+ if __name__ == "__main__":
93
+ main()
dataset.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Chessbench dataset of records with a fen board position, a uci move, a win probability after the uci move is made, and a checkmate status."""
2
+
3
+ import bisect
4
+ from pathlib import Path
5
+ from typing import Dict, List, Tuple
6
+
7
+ from torch.utils.data import Dataset
8
+
9
+ from athena.datasets.chessbenchmate.utils import constants
10
+ from athena.datasets.chessbenchmate.utils.bagz import BagReader
11
+
12
+
13
+ class ChessbenchDataset(Dataset):
14
+ """Chessbench dataset of records with a fen board position, a uci move, a win probability after the uci move is made, and a checkmate status."""
15
+
16
+ def __init__(self, dir: str, mode: str = "train"):
17
+ """Initialize the ChessbenchDataset.
18
+
19
+ Args:
20
+ dir: Root directory containing train/test subdirectories
21
+ mode: Either "train" or "test"
22
+ """
23
+ self.dir = Path(dir)
24
+ self.mode = mode
25
+ self.data_dir = self.dir / mode
26
+
27
+ # Collect and cache all bags with their lengths
28
+ self.bags: List[Tuple[Path, int]] = []
29
+ self._cumulative_lengths: List[int] = []
30
+ self._open_readers: Dict[Path, BagReader] = {} # Cache for open readers
31
+ total_records = 0
32
+
33
+ # Find all bag files in the specified directory
34
+ for bag_path in sorted(self.data_dir.glob("*.bag")):
35
+ bag_reader = BagReader(str(bag_path))
36
+ bag_length = len(bag_reader)
37
+ self.bags.append((bag_path, bag_length))
38
+ total_records += bag_length
39
+ self._cumulative_lengths.append(total_records)
40
+ self._open_readers[bag_path] = bag_reader
41
+
42
+ self._total_length = total_records
43
+
44
+ if len(self.bags) == 0:
45
+ raise ValueError(f"No .bag files found in {self.data_dir}")
46
+
47
+ def __len__(self):
48
+ """Returns the total number of records in the dataset."""
49
+ return self._total_length
50
+
51
+ def __getitem__(self, idx) -> Tuple[str, str, float, str | int]:
52
+ """Gets a record from the dataset.
53
+
54
+ Args:
55
+ idx: The index of the record to retrieve.
56
+
57
+ Returns:
58
+ tuple: (fen_string, move_uci, win_probability)
59
+ win_probability will be None for training data
60
+ """
61
+ if idx < 0 or idx >= len(self):
62
+ raise IndexError(f"Index {idx} out of range [0, {len(self)})")
63
+
64
+ # Find which bag contains this index
65
+ bag_idx = bisect.bisect_right(self._cumulative_lengths, idx)
66
+
67
+ # Calculate index within the specific bag
68
+ if bag_idx > 0:
69
+ idx_in_bag = idx - self._cumulative_lengths[bag_idx - 1]
70
+ else:
71
+ idx_in_bag = idx
72
+
73
+ # Get or create the reader
74
+ bag_path, _ = self.bags[bag_idx]
75
+ if bag_path not in self._open_readers:
76
+ self._open_readers[bag_path] = BagReader(str(bag_path))
77
+
78
+ # Get and parse the record
79
+ record = self._open_readers[bag_path][idx_in_bag]
80
+ fen, move, win_prob, mate = constants.CODERS["action_value_with_mate"].decode(record)
81
+ return fen, move, win_prob, mate
82
+
83
+ @property
84
+ def num_bags(self) -> int:
85
+ """Return the number of bag files in this dataset."""
86
+ return len(self.bags)
87
+
88
+
89
+ if __name__ == "__main__":
90
+ # Example usage
91
+ dataset = ChessbenchDataset(dir="src/athena/datasets/chessbenchmate/data", mode="train")
92
+ print(f"Total records: {len(dataset)}")
93
+ print(f"Number of bags: {dataset.num_bags}")
94
+
95
+ for i in range(len(dataset)):
96
+ fen, move, win_prob, mate = dataset[i]
97
+ print(f"Record {i}: {fen}, {move}, {win_prob}, {mate}")
download.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ BASE_URL="https://huggingface.co/datasets/joshuakgao/chessbenchmate/resolve/main"
3
+
4
+ # Download parts 000 through 117
5
+ for i in $(seq -w 038 117); do
6
+ URL="${BASE_URL}/chessbenchmate.tar.part${i}"
7
+ echo "Downloading ${URL}"
8
+ wget "${URL}"
9
+ done
10
+
11
+ # Merge parts into one tar file
12
+ cat chessbenchmate.tar.part* > chessbenchmate.tar
13
+
14
+ # Extract the merged tar
15
+ tar -xvf chessbenchmate.tar
utils/__pycache__/bagz.cpython-313.pyc ADDED
Binary file (15.1 kB). View file
 
utils/__pycache__/constants.cpython-313.pyc ADDED
Binary file (2.8 kB). View file
 
utils/bagz.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bagz file reader/writer and PyGrain-compatible data source for POSIX systems.
2
+
3
+ Bagz is a file format for storing a sequence of string records, typically
4
+ serialised protocol buffers. It supports fast index based look-up.
5
+ """
6
+
7
+ import bisect
8
+ import itertools
9
+ import mmap
10
+ import os
11
+ import re
12
+ import shutil
13
+ import struct
14
+ from collections.abc import Sequence
15
+ from typing import Any, SupportsIndex
16
+
17
+ import zstandard as zstd
18
+ from typing_extensions import Self
19
+
20
+
21
+ class BagFileReader(Sequence[bytes]):
22
+ """Reader for single Bagz files."""
23
+
24
+ def __init__(
25
+ self,
26
+ filename: str,
27
+ *,
28
+ separate_limits: bool = False,
29
+ decompress: bool | None = None,
30
+ ) -> None:
31
+ """Creates a BagFileReader.
32
+
33
+ Args:
34
+ filename: The name of the single Bagz file to read.
35
+ separate_limits: Whether the limits are stored in a separate file.
36
+ decompress: Whether to decompress the records. If None, uses the file
37
+ extension to determine whether to decompress.
38
+ """
39
+ if decompress or (decompress is None and filename.endswith(".bagz")):
40
+ self._process = lambda x: zstd.decompress(x) if x else x
41
+ else:
42
+ self._process = lambda x: x
43
+ self._filename = filename
44
+ fd = os.open(filename, os.O_RDONLY)
45
+ try:
46
+ self._records = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
47
+ file_size = self._records.size()
48
+ except ValueError:
49
+ self._records = b""
50
+ file_size = 0
51
+ finally:
52
+ os.close(fd)
53
+ if separate_limits:
54
+ directory, name = os.path.split(filename)
55
+ fd = os.open(os.path.join(directory, "limits." + name), os.O_RDONLY)
56
+ try:
57
+ self._limits = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
58
+ index_size = self._limits.size()
59
+ except ValueError:
60
+ self._limits = b""
61
+ index_size = 0
62
+ finally:
63
+ os.close(fd)
64
+ index_start = 0
65
+ else:
66
+ if 0 < file_size < 8:
67
+ raise ValueError("Bagz file too small")
68
+ self._limits = self._records
69
+ if file_size:
70
+ (index_start,) = struct.unpack("<Q", self._records[-8:])
71
+ else:
72
+ index_start = 0
73
+ assert file_size >= index_start
74
+ index_size = file_size - index_start
75
+ assert index_size % 8 == 0
76
+ self._num_records = index_size // 8
77
+ self._limits_start = index_start
78
+
79
+ def __len__(self) -> int:
80
+ """Returns the number of records in the Bagz file."""
81
+ return self._num_records
82
+
83
+ def __getitem__(self, index: SupportsIndex) -> bytes:
84
+ """Returns a record from the Bagz file."""
85
+ i = index.__index__()
86
+ if not 0 <= i < self._num_records:
87
+ raise IndexError("bagz.BragReader index out of range")
88
+ end = i * 8 + self._limits_start
89
+ if i:
90
+ rec_range = struct.unpack("<2q", self._limits[end - 8 : end + 8])
91
+ else:
92
+ rec_range = (0, *struct.unpack("<q", self._limits[end : end + 8]))
93
+ return self._process(self._records[slice(*rec_range)])
94
+
95
+
96
+ class BagShardReader(Sequence[bytes]):
97
+ """Reader for sharded Bagz files."""
98
+
99
+ def __init__(
100
+ self,
101
+ filename: str,
102
+ *,
103
+ separate_limits: bool = False,
104
+ decompress: bool | None = None,
105
+ ) -> None:
106
+ """Creates a BagShardReader.
107
+
108
+ Args:
109
+ filename: The name of the sharded Bagz file to read.
110
+ separate_limits: Whether the limits are stored in a separate file.
111
+ decompress: Whether to decompress the records. If None, uses the file
112
+ extension to determine whether to decompress.
113
+ """
114
+ matches = re.findall(r"@(\d+)", filename)
115
+ assert len(matches) == 1
116
+ num_files = int(matches[0])
117
+ assert num_files < 100_000
118
+ self._bags = tuple(
119
+ BagFileReader(
120
+ filename=re.sub(r"@(\d+)", f"-{idx:05d}-of-{num_files:05d}", filename),
121
+ separate_limits=separate_limits,
122
+ decompress=decompress,
123
+ )
124
+ for idx in range(num_files)
125
+ )
126
+ self._accum = tuple(itertools.accumulate(map(len, self._bags)))
127
+
128
+ def __len__(self) -> int:
129
+ """Returns the number of records in the Bagz file."""
130
+ return self._accum[-1]
131
+
132
+ def __getitem__(self, index: int) -> bytes:
133
+ """Returns a record from the Bagz file."""
134
+ if index < 0:
135
+ index += self._accum[-1]
136
+ if seqn := bisect.bisect_left(self._accum, index + 1):
137
+ index -= self._accum[seqn - 1]
138
+ return self._bags[seqn][index]
139
+
140
+
141
+ class BagReader(Sequence[bytes]):
142
+ """Reader for Bagz files."""
143
+
144
+ def __init__(
145
+ self,
146
+ filename: str,
147
+ *,
148
+ separate_limits: bool = False,
149
+ decompress: bool | None = None,
150
+ ) -> None:
151
+ """Creates a BagReader.
152
+
153
+ Args:
154
+ filename: The name of the Bagz file to read. Supports the @N shard syntax
155
+ (where @0 corresponds to the single file case). If the shard syntax does
156
+ not parse, then `filename` is treated as a single file.
157
+ separate_limits: Whether the limits are stored in a separate file.
158
+ decompress: Whether to decompress the records. If None, uses the file
159
+ extension to determine whether to decompress.
160
+ """
161
+ if matches := re.findall(r"@(\d+)", filename):
162
+ assert len(matches) == 1
163
+ if int(matches[0]) != "0":
164
+ reader_class = BagShardReader
165
+ else:
166
+ filename = filename.replace(matches[0], "")
167
+ reader_class = BagFileReader
168
+ else:
169
+ reader_class = BagFileReader
170
+
171
+ self._reader = reader_class(
172
+ filename=filename,
173
+ separate_limits=separate_limits,
174
+ decompress=decompress,
175
+ )
176
+
177
+ def __len__(self) -> int:
178
+ """Returns the number of records in the Bagz file."""
179
+ return len(self._reader)
180
+
181
+ def __getitem__(self, index: SupportsIndex) -> bytes:
182
+ """Returns a record from the Bagz file."""
183
+ return self._reader[index]
184
+
185
+
186
+ class BagWriter:
187
+ """Writer for Bagz files."""
188
+
189
+ def __init__(
190
+ self,
191
+ filename: str,
192
+ *,
193
+ separate_limits: bool = False,
194
+ compress: bool | None = None,
195
+ compression_level: int = 0,
196
+ ) -> None:
197
+ """Creates a BagWriter.
198
+
199
+ Args:
200
+ filename: The name of the Bagz file to write.
201
+ separate_limits: Whether to keep the limits in a separate file.
202
+ compress: Whether to compress the records. If None, uses the file
203
+ extension to determine whether to compress.
204
+ compression_level: The compression level to use when compressing.
205
+ """
206
+ if compress or (compress is None and filename.endswith(".bagz")):
207
+ self._process = zstd.ZstdCompressor(level=compression_level).compress
208
+ else:
209
+ self._process = lambda x: x
210
+ self._separate_limits = separate_limits
211
+ directory, name = os.path.split(filename)
212
+ self._records = open(filename, "wb")
213
+ self._limits = open(os.path.join(directory, "limits." + name), "wb+")
214
+
215
+ def write(self, data: bytes) -> None:
216
+ """Writes a record to the Bagz file."""
217
+ if data:
218
+ self._records.write(self._process(data))
219
+ self._limits.write(struct.pack("<q", self._records.tell()))
220
+
221
+ def flush(self) -> None:
222
+ """Flushes the Bagz file."""
223
+ self._records.flush()
224
+ self._limits.flush()
225
+
226
+ def __enter__(self) -> Self:
227
+ """Enters the runtime context related to this object."""
228
+ return self
229
+
230
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
231
+ """Ensures the Bagz file is closed when exiting a context."""
232
+ self.close()
233
+
234
+ def close(self) -> None:
235
+ """Concatenates the limits file to the end of the data file."""
236
+ if self._separate_limits:
237
+ self._records.close()
238
+ self._limits.close()
239
+ else:
240
+ self._limits.seek(0)
241
+ shutil.copyfileobj(self._limits, self._records)
242
+ self._records.close()
243
+ os.unlink(self._limits.name)
244
+ self._limits.close()
245
+
246
+
247
+ class BagDataSource:
248
+ """PyGrain-compatible data source for bagz files."""
249
+
250
+ def __init__(self, path) -> None:
251
+ """Creates a new BagDataSource object.
252
+
253
+ Args:
254
+ path: The path to the bag file.
255
+ """
256
+ self._path = os.fspath(path)
257
+ self._reader = BagReader(self._path)
258
+ self._num_records = len(self._reader)
259
+
260
+ def __len__(self) -> int:
261
+ """Returns the number of records in the Bagz file."""
262
+ return self._num_records
263
+
264
+ def __getitem__(self, record_key: SupportsIndex) -> bytes:
265
+ """Returns a record from the Bagz file."""
266
+ return self._reader[record_key]
267
+
268
+ def __getstate__(self) -> dict[str, Any]:
269
+ """Returns the state of the BagDataSource."""
270
+ state = self.__dict__.copy()
271
+ del state["_reader"]
272
+ return state
273
+
274
+ def __setstate__(self, state) -> None:
275
+ """Restores the state of the BagDataSource."""
276
+ self.__dict__.update(state)
277
+ self._reader = BagReader(self._path)
278
+
279
+ def __repr__(self) -> str:
280
+ """Returns a string representation of the BagDataSource."""
281
+ return f"BagDataSource(path={self._path!r})"
utils/constants.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Coder that can encode an int or a char."""
2
+
3
+ from apache_beam import coders
4
+
5
+
6
+ class IntOrCharCoder(coders.Coder):
7
+ """A coder that can encode either an integer or a single character string."""
8
+
9
+ def encode(self, value):
10
+ """Encode the given value."""
11
+ if isinstance(value, int):
12
+ return b"\x00" + coders.VarIntCoder().encode(value)
13
+ elif isinstance(value, str) and len(value) == 1:
14
+ return b"\x01" + value.encode("utf-8")
15
+ else:
16
+ raise ValueError("Value must be an int or a single character string.")
17
+
18
+ def decode(self, encoded):
19
+ """Decode the given encoded value."""
20
+ if encoded[0:1] == b"\x00":
21
+ return coders.VarIntCoder().decode(encoded[1:])
22
+ elif encoded[0:1] == b"\x01":
23
+ return encoded[1:].decode("utf-8")
24
+ else:
25
+ raise ValueError("Invalid encoding prefix.")
26
+
27
+ def is_deterministic(self):
28
+ """Check if the coder is deterministic."""
29
+ return True
30
+
31
+
32
+ CODERS = {
33
+ "fen": coders.StrUtf8Coder(),
34
+ "move": coders.StrUtf8Coder(),
35
+ "count": coders.BigIntegerCoder(),
36
+ "win_prob": coders.FloatCoder(),
37
+ "mate": IntOrCharCoder(), # Use the custom coder here
38
+ }
39
+
40
+ CODERS["state_value"] = coders.TupleCoder(
41
+ (
42
+ CODERS["fen"],
43
+ CODERS["win_prob"],
44
+ )
45
+ )
46
+ CODERS["action_value"] = coders.TupleCoder(
47
+ (
48
+ CODERS["fen"],
49
+ CODERS["move"],
50
+ CODERS["win_prob"],
51
+ )
52
+ )
53
+ CODERS["action_value_with_mate"] = coders.TupleCoder(
54
+ (
55
+ CODERS["fen"],
56
+ CODERS["move"],
57
+ CODERS["win_prob"],
58
+ CODERS["mate"],
59
+ )
60
+ )
61
+ CODERS["behavioral_cloning"] = coders.TupleCoder(
62
+ (
63
+ CODERS["fen"],
64
+ CODERS["move"],
65
+ )
66
+ )