pid_lines_dataset / split_dataset.py
prasatee's picture
Add files using upload-large-folder tool
6e6a3a7 verified
#!/usr/bin/env python3
"""
Script to split the lines dataset into train/validation/test sets (80/10/10)
and transform the data format.
"""
import json
import os
import shutil
import random
from pathlib import Path
# Set random seed for reproducibility
random.seed(42)
# Define paths
BASE_DIR = Path("/Users/prasatee/Desktop/unsloth/DigitizePID_Dataset/lines_dataset")
TRAIN_DIR = BASE_DIR / "train"
VAL_DIR = BASE_DIR / "validation"
TEST_DIR = BASE_DIR / "test"
# Read current metadata
metadata_path = TRAIN_DIR / "metadata.jsonl"
data = []
print("Reading metadata...")
with open(metadata_path, "r") as f:
for line in f:
entry = json.loads(line.strip())
# Transform to new format (flatten the "lines" field)
new_entry = {
"file_name": entry["file_name"],
"source_image_idx": entry["source_image_idx"],
"crop_idx": entry["crop_idx"],
"width": entry["width"],
"height": entry["height"],
"segments": entry["lines"]["segments"],
"line_types": entry["lines"]["line_types"],
"pipelines": entry["lines"]["pipelines"],
}
data.append(new_entry)
print(f"Total entries: {len(data)}")
# Shuffle data
random.shuffle(data)
# Calculate split sizes
total = len(data)
train_size = int(0.8 * total)
val_size = int(0.1 * total)
test_size = total - train_size - val_size
train_data = data[:train_size]
val_data = data[train_size:train_size + val_size]
test_data = data[train_size + val_size:]
print(f"Train: {len(train_data)}, Validation: {len(val_data)}, Test: {len(test_data)}")
# Create directories
VAL_DIR.mkdir(exist_ok=True)
TEST_DIR.mkdir(exist_ok=True)
print("\nMoving files...")
# Move validation files
print("Processing validation set...")
for entry in val_data:
src = TRAIN_DIR / entry["file_name"]
dst = VAL_DIR / entry["file_name"]
if src.exists():
shutil.move(str(src), str(dst))
# Move test files
print("Processing test set...")
for entry in test_data:
src = TRAIN_DIR / entry["file_name"]
dst = TEST_DIR / entry["file_name"]
if src.exists():
shutil.move(str(src), str(dst))
# Write new metadata files
print("\nWriting metadata files...")
# Train metadata
train_metadata_path = TRAIN_DIR / "metadata.jsonl"
with open(train_metadata_path, "w") as f:
for entry in train_data:
f.write(json.dumps(entry) + "\n")
# Validation metadata
val_metadata_path = VAL_DIR / "metadata.jsonl"
with open(val_metadata_path, "w") as f:
for entry in val_data:
f.write(json.dumps(entry) + "\n")
# Test metadata
test_metadata_path = TEST_DIR / "metadata.jsonl"
with open(test_metadata_path, "w") as f:
for entry in test_data:
f.write(json.dumps(entry) + "\n")
print("\nDone!")
print(f"Train set: {len(train_data)} samples in {TRAIN_DIR}")
print(f"Validation set: {len(val_data)} samples in {VAL_DIR}")
print(f"Test set: {len(test_data)} samples in {TEST_DIR}")
# Verify first entry format
print("\nSample entry format:")
print(json.dumps(train_data[0], indent=2))