File size: 4,489 Bytes
4eb33e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import json
import argparse
import random
from pathlib import Path
from tqdm import tqdm
import datasets
from huggingface_hub import HfApi, RepoCard
from transformers import HfArgumentParser

random.seed(0)

def generate_unique_multiplication_data(a_max, b_max, n_train, n_test):
    """Generate train and test datasets for each multiplication range ensuring no overlap."""
    datasets = {}
    
    for a in range(1, a_max + 1):
        for b in range(1, b_max + 1):
            all_pairs = [(x, y) for x in range(1, a + 1) for y in range(1, b + 1)]
            
            # Convert to list before sampling to avoid Python 3.9+ warning
            test_data = set(random.sample(list(all_pairs), min(n_test, len(all_pairs))))  
            train_data = set(random.sample(list(set(all_pairs) - test_data), min(n_train, len(set(all_pairs) - test_data))))

            datasets[f"{a}x{b}"] = {"train": list(train_data), "test": list(test_data)}

    return datasets

def save_to_jsonl(data, file_path):
    """Save dataset to JSONL format."""
    with open(file_path, "w") as f:
        for a, b in data:
            json.dump({"problem": f"What is {a} times {b}?", "answer": str(a * b)}, f)
            f.write("\n")

def prepare_datasets(output_dir):
    """Prepare train and test datasets ensuring no overlap for all 1x1 to 15x15 combinations."""
    output_dir = Path(output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)

    all_datasets = generate_unique_multiplication_data(a_max=15, b_max=15, n_train=1000, n_test=100)

    train_files, test_files = [], []
    for name, data in all_datasets.items():
        train_file = output_dir / f"multiplication_train_{name}.jsonl"
        test_file = output_dir / f"multiplication_test_{name}.jsonl"

        save_to_jsonl(data["train"], train_file)
        save_to_jsonl(data["test"], test_file)

        train_files.append(train_file)
        test_files.append(test_file)

    print(f"\n✅ Datasets saved to {output_dir}")
    return train_files, test_files

def process_file(file_path):
    """Convert JSONL data into Hugging Face dataset format."""
    with open(file_path, "r") as f:
        data = [json.loads(line.strip()) for line in f if line.strip()]

    dataset = {
        "messages": [[
            {"role": "user", "content": item["problem"]},
            {"role": "assistant", "content": item["answer"]},
        ] for item in data],
        "ground_truth": [item["answer"] for item in data],
        "dataset": ["multiplication"] * len(data),
    }
    return datasets.Dataset.from_dict(dataset)

def push_to_huggingface(train_files, test_files, hf_entity):
    """Push datasets to Hugging Face Hub and print the dataset link."""
    api = HfApi()
    hf_entity = hf_entity or api.whoami()["name"]

    print("\n📤 Uploading datasets to Hugging Face...\n")

    for file in train_files + test_files:
        dataset = process_file(file)
        dataset_name = file.stem
        repo_id = f"{hf_entity}/{dataset_name}"
        hf_url = f"https://huggingface.co/datasets/{repo_id}"

        print(f"✅ Dataset uploaded: {dataset_name}")
        # print(f"🔗 Click to view: {hf_url}\n")  # 👈 PRINTS THE LINK

        dataset.push_to_hub(repo_id)

        api.upload_file(
            path_or_fileobj=__file__,
            path_in_repo="create_dataset.py",
            repo_type="dataset",
            repo_id=repo_id,
        )

        # Add RepoCard with Hugging Face link
        repo_card = RepoCard(
            content=f"""\
# Multiplication Dataset - {dataset_name}

This dataset contains multiplication problems for numbers up to 15x15.

## Dataset Format

- `messages`: User question and assistant answer.
- `ground_truth`: Correct multiplication result.
- `dataset`: "multiplication"

## Hugging Face Dataset Link
➡️ [View dataset on Hugging Face]({hf_url})
"""
        )
        repo_card.push_to_hub(repo_id, repo_type="dataset")

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--output_dir", type=str, default="math_data", help="Output directory")
    parser.add_argument("--push_to_hub", action="store_true", help="Upload to Hugging Face")
    parser.add_argument("--hf_entity", type=str, default=None, help="Hugging Face entity")
    args = parser.parse_args()

    train_files, test_files = prepare_datasets(args.output_dir)

    if args.push_to_hub:
        push_to_huggingface(train_files, test_files, args.hf_entity)

if __name__ == "__main__":
    main()