Goekdeniz-Guelmez's picture
Upload ds.py
5c1e4b0 verified
from datasets import load_dataset, concatenate_datasets
import json
import os
# Load the dataset
print("Loading dataset...")
dataset = load_dataset("unsloth/LaTeX_OCR")
def add_question_and_rename(example):
"""Add question field and rename text to answer"""
# Add the question field
example["question"] = "Convert this into LaTeX code, only provide the LaTeX code."
# Rename 'text' to 'answer'
if "text" in example:
example["answer"] = example["text"]
del example["text"]
return example
print("Modifying dataset...")
# Apply the transformation to all splits
modified_dataset = dataset.map(add_question_and_rename)
# Create output directories
os.makedirs("./parquet_files", exist_ok=True)
os.makedirs("./modified_latex_ocr_dataset", exist_ok=True)
# Save locally (this handles images properly)
print("Saving dataset locally...")
modified_dataset.save_to_disk("./modified_latex_ocr_dataset")
# Save as individual Parquet files per split
print("Saving as Parquet files (per split)...")
for split_name, split_data in modified_dataset.items():
parquet_file = f"./parquet_files/modified_latex_ocr_{split_name}.parquet"
split_data.to_parquet(parquet_file)
print(f"Saved {split_name} split to {parquet_file}")
# Save as single consolidated Parquet file (if you have multiple splits)
if len(modified_dataset) > 1:
print("Saving as consolidated Parquet file...")
# Add split information to each example
all_datasets = []
for split_name, split_data in modified_dataset.items():
# Add split name to each example
split_data_with_info = split_data.map(lambda x: {**x, "split": split_name})
all_datasets.append(split_data_with_info)
# Concatenate all splits
consolidated_dataset = concatenate_datasets(all_datasets)
consolidated_dataset.to_parquet("./parquet_files/modified_latex_ocr_all.parquet")
print("Saved consolidated dataset to ./parquet_files/modified_latex_ocr_all.parquet")
# Save just the text data as JSON for inspection (excluding images)
print("Saving text-only JSON files...")
for split_name, split_data in modified_dataset.items():
text_only_data = []
for i, sample in enumerate(split_data):
text_sample = {
"id": i,
"question": sample["question"],
"answer": sample["answer"]
}
# Add any other non-image fields if they exist
for key, value in sample.items():
if key not in ["image", "question", "answer"] and not hasattr(value, 'save'):
text_sample[key] = value
text_only_data.append(text_sample)
# Save text data as JSON
output_file = f"modified_latex_ocr_{split_name}_text_only.json"
with open(output_file, 'w') as f:
json.dump(text_only_data, f, indent=2, ensure_ascii=False)
print(f"Saved {split_name} text data to {output_file}")
print("\n" + "="*50)
print("DATASET MODIFICATION COMPLETE!")
print("="*50)
# Print dataset info
print(f"\nDataset structure: {modified_dataset}")
for split_name in modified_dataset.keys():
print(f" {split_name} split: {len(modified_dataset[split_name])} samples")
# Show file information
print(f"\nOutput files created:")
print(f" 📁 ./modified_latex_ocr_dataset/ - Full dataset with images")
print(f" 📁 ./parquet_files/ - Parquet files")
if os.path.exists("./parquet_files"):
for file in sorted(os.listdir("./parquet_files")):
if file.endswith(".parquet"):
file_path = os.path.join("./parquet_files", file)
size_mb = os.path.getsize(file_path) / (1024 * 1024)
print(f" 📄 {file}: {size_mb:.2f} MB")
# Show JSON files
json_files = [f for f in os.listdir(".") if f.startswith("modified_latex_ocr") and f.endswith(".json")]
if json_files:
print(f" 📄 Text-only JSON files:")
for file in sorted(json_files):
size_kb = os.path.getsize(file) / 1024
print(f" 📄 {file}: {size_kb:.2f} KB")
# Show a sample
if len(modified_dataset) > 0:
first_split = list(modified_dataset.keys())[0]
sample = modified_dataset[first_split][0]
print(f"\n📋 Sample from {first_split} split:")
print(f" Keys: {list(sample.keys())}")
print(f" Question: {sample['question']}")
if 'image' in sample:
print(f" Image: {type(sample['image'])}")
answer_preview = sample['answer'][:100] + "..." if len(sample['answer']) > 100 else sample['answer']
print(f" Answer: {answer_preview}")