monoids-100 / generate_data.py
jowenpetty's picture
jp-add-code (#1)
957af3b verified
import json
import logging
import os
import random
from functools import partial
from itertools import accumulate
from logging import Logger
from pathlib import Path
from typing import Any
import fire
import numpy as np
import numpy.typing as npt
import pyrootutils
from abstract_algebra.finite_algebras import (
Group,
generate_cyclic_group,
generate_symmetric_group,
)
from dotenv import load_dotenv
from tqdm import tqdm
PROJECT_ROOT: Path = pyrootutils.find_root(
search_from=__file__, indicator=".project-root"
)
DATA_DIR: Path = PROJECT_ROOT / "data"
load_dotenv(PROJECT_ROOT / ".env")
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%Y-%d-%m %H:%M:%S",
level=logging.INFO,
)
log: Logger = logging.getLogger(__name__)
def _set_all_seeds(seed: int):
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
random.seed(seed)
log.info(f"Set all seeds to {seed}")
def _create_group(g: tuple[str, int]) -> Group:
"""
Create a group based on the input string.
"""
if g[0] == "S":
return generate_symmetric_group(g[1])
elif g[0] == "A":
s_n: Group = generate_symmetric_group(g[1])
a_n: Group = s_n.commutator_subalgebra()
a_n.name = f"A{g[1]}"
return a_n
elif g[0] == "Z":
return generate_cyclic_group(g[1])
else:
raise ValueError(f"Unknown group type: {g[0]}")
def generate_data(
max_sequence_length: int = 100,
samples_per_group: int = 100_000,
seed: int | None = None,
groups: list[str] = [
"S2",
"S3",
"S4",
"S5",
"S6", # symmetric groups
"A3",
"A4",
"A5",
"A6", # alternating groups
"Z6",
"Z12",
"Z24",
"Z60",
"Z120",
"Z360",
"Z720", # cyclic groups
],
):
"""
Generates datasets of sequences of group elements and their products.
By default, we generate 1 million samples per group, with a maximum sequence
length of 100. We generate samples from 5 symmetric groups (S2, S3, S4, S5, S6),
4 alternating groups (A3, A4, A5, A6), and 7 cyclic groups (Z6, Z12, Z24, Z60,
Z120, Z360, Z720). The cyclic groups are chosen to correspond to the order of
the symmetric and alternating groups:
- S2: 2
- S3: 6
- S4: 24
- S5: 120
- S6: 720
- A3: 3 == Z3
- A4: 12
- A5: 60
- A6: 360
- Z6: 6
- Z12: 12
- Z24: 24
- Z60: 60
- Z120: 120
- Z360: 360
- Z720: 720
"""
def _group_reduce(lhs: str | int, rhs: int, G: Group) -> int:
if isinstance(lhs, str):
prod: str = G.op(lhs, G.elements[rhs])
else:
prod: str = G.op(G.elements[lhs], G.elements[rhs])
return G.elements.index(prod)
if seed is not None:
log.info("Using seed from argument")
_set_all_seeds(seed)
elif os.environ.get("DATA_SEED") is not None:
log.info("Using seed from environment variable `DATA_SEED`")
seed: int = int(os.environ["DATA_SEED"])
_set_all_seeds(seed)
else:
log.info("Using random seed")
seed: int = np.random.randint(0, 2**32 - 1)
_set_all_seeds(seed)
for group_name in tqdm(groups, position=0):
log.info(
f"Generating {samples_per_group} samples with a max sequence length of {max_sequence_length} from {group_name}"
)
group_type: str = group_name[0]
group_num: int = int(group_name[1:])
G: Group = _create_group((group_type, group_num))
num_elements: int = len(G.elements)
samples: list[dict[str, Any]] = []
for _ in tqdm(range(samples_per_group), position=1):
s_len: int = np.random.randint(1, max_sequence_length + 1)
s_elts: npt.ArrayLike[np.int_] = np.random.choice(
num_elements, size=s_len, replace=True
).tolist()
scanned: list[int] = list(
accumulate(s_elts, partial(_group_reduce, G=G), initial=0)
)
product: int = scanned[-1]
samples.append(
{
"sequence": s_elts,
"intermediate_products": scanned,
"product": product,
"sequence_length": s_len,
"group": group_name,
}
)
dataset_path: Path = DATA_DIR / f"{group_name}.jsonl"
if not dataset_path.parent.exists():
dataset_path.parent.mkdir(parents=True, exist_ok=True)
log.info(f"Writing dataset to {dataset_path}")
with open(dataset_path, "w") as f:
for sample in samples:
f.write(json.dumps(sample, separators=(",", ":")) + "\n")
del samples
del G
if __name__ == "__main__":
fire.Fire(generate_data)