Datasets:

Modalities:
Text
Formats:
arrow
Libraries:
Datasets
VTCTrain / prepare_sft.py
z-hb's picture
Update prepare_sft.py
6a8f19f verified
from datasets import Dataset
import sys
import os
import numpy as np
DRY_RUN: bool = os.getenv("DRY_RUN", "0") == "1"
__doc__ = rf"""
A simple preprocessing script for preparing SFT data.
Usage:
python {sys.argv[0]} /path/to/VTCTrain # results will be saved to /path/to/VTCTrain/sft
DRY_RUN=1 python {sys.argv[0]} /path/to/VTCTrain # to inspect the output format without saving
Output arrow schema looks like:
```json
{{
'instruction': 'str',
'problem': 'str',
'answer': 'str',
}}
```
You may customize instruction templates to mitigate overfitting,
or adjust problem/answer mildly to avoid exact matches.
"""
# normally this would be /path/to/VTCTrain
parent_folder = sys.argv[1]
assert "VTC" or 'vtc' in parent_folder, "Please provide a VTC path."
RULER_INSTRUCTION = [
"Answer a question based on the above book snippet. Your answer should be short and based on either explicitly stated facts or strong, logical inferences. Return only the final answer with no additional explanation or reasoning. Question: ",
"Using the provided book snippet, answer the question below briefly. Base your response on factual evidence or clear logic from the text. Provide only the final answer—no explanations or extra words. Question: ",
"Provide a concise answer to the following question based solely on the images above. Use only explicit details or direct inferences. You must output the final answer alone, omitting any reasoning or commentary. Question: ",
"Respond to the question using the text snippet above. Your answer must be short and derived from either stated facts or sound logical deductions. Do not include justifications or introductory text; return only the answer. Question: ",
"Answer the question based on the preceding book fragment. Ensure the response is brief and grounded in textual facts or strong inferences. The output should consist exclusively of the final answer without any additional analysis. Question: ",
"Based on the images above, answer the question shortly. Rely on facts or logical conclusions. Final answer only. No reasoning, no explanation. Question: "
]
NOLIMA_INSTRUCTION = [
"Answer a question based on the above book snippet. Your answer should be short and based on either explicitly stated facts or strong, logical inferences. Return only the final answer with no additional explanation or reasoning. Question: ",
"Refer to the book snippet images above to answer the question below. Provide a brief response based on facts or logical deductions. Return the answer only—no explanations. Question: ",
"Based on the provided book excerpts, give a concise answer to the following question. Use only explicit information or strong logical inferences. Do not include any reasoning or preamble in your response. Question: ",
"Answer the following question using the book images provided. Constraints: 1. Keep it short. 2. Base it on facts or clear logic. 3. Output ONLY the final answer without any additional text or commentary. Question: ",
"Using the book snippet above as your only source, briefly answer this question. Ground your response in the text or what can be logically inferred from it. Please provide just the answer itself, skipping any explanation. Question: ",
"Identify the answer to the question below from the attached book images. Ensure your response is succinct and evidence-based. Return only the final result, omitting any background reasoning. Question: "
]
def ruler_map(e: dict, idx: int) -> dict:
rng = np.random.default_rng(idx)
first_gt = e["answers"][0]
if isinstance(first_gt, str) and not first_gt.isdigit():
problem = f'What is one of the magic words for {e["problem"]}?'
else:
# isdigit or int, treat as number
assert isinstance(first_gt, (str, int))
problem = f'What is one of the magic numbers for {e["problem"]}?'
return {
"instruction": rng.choice(RULER_INSTRUCTION),
"problem": "<image>" * len(e["images"]) + problem,
"answer": ",".join(e["answers"]),
}
def nolima_map(e: dict, idx: int) -> dict:
rng = np.random.default_rng(idx)
problem = e['problem']
return {
"instruction": rng.choice(NOLIMA_INSTRUCTION),
"problem": "<image>" * len(e["images"]) + problem,
"answer": e["answers"][0],
}
subfolders = ["NoLiMa", "RULER-MK", "RULER-MQ", "RULER-MV", "RULER-S"]
mapping_fns = [nolima_map, ruler_map, ruler_map, ruler_map, ruler_map]
max_samples = [4000, 2000, 2000, 2000, 2000]
for subfolder, mapping_fn, max_sample in zip(subfolders, mapping_fns, max_samples):
d = Dataset.load_from_disk(f"{parent_folder}/{subfolder}")
d = d.select(range(min(len(d), max_sample)))
d: Dataset = d.map(mapping_fn, with_indices=True, batch_size=100, writer_batch_size=100, num_proc=50)
sample = d[0]
print(
f"""
instruction: {sample["instruction"]}
problem: {sample["problem"]}
answer: {sample["answer"]}
"""
)
if DRY_RUN:
continue
d.save_to_disk(f"{parent_folder}/sft/{subfolder}", num_proc=50)