|
|
|
|
|
""" |
|
|
Script to split the lines dataset into train/validation/test sets (80/10/10) |
|
|
and transform the data format. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import os |
|
|
import shutil |
|
|
import random |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
random.seed(42) |
|
|
|
|
|
|
|
|
BASE_DIR = Path("/Users/prasatee/Desktop/unsloth/DigitizePID_Dataset/lines_dataset") |
|
|
TRAIN_DIR = BASE_DIR / "train" |
|
|
VAL_DIR = BASE_DIR / "validation" |
|
|
TEST_DIR = BASE_DIR / "test" |
|
|
|
|
|
|
|
|
metadata_path = TRAIN_DIR / "metadata.jsonl" |
|
|
data = [] |
|
|
|
|
|
print("Reading metadata...") |
|
|
with open(metadata_path, "r") as f: |
|
|
for line in f: |
|
|
entry = json.loads(line.strip()) |
|
|
|
|
|
new_entry = { |
|
|
"file_name": entry["file_name"], |
|
|
"source_image_idx": entry["source_image_idx"], |
|
|
"crop_idx": entry["crop_idx"], |
|
|
"width": entry["width"], |
|
|
"height": entry["height"], |
|
|
"segments": entry["lines"]["segments"], |
|
|
"line_types": entry["lines"]["line_types"], |
|
|
"pipelines": entry["lines"]["pipelines"], |
|
|
} |
|
|
data.append(new_entry) |
|
|
|
|
|
print(f"Total entries: {len(data)}") |
|
|
|
|
|
|
|
|
random.shuffle(data) |
|
|
|
|
|
|
|
|
total = len(data) |
|
|
train_size = int(0.8 * total) |
|
|
val_size = int(0.1 * total) |
|
|
test_size = total - train_size - val_size |
|
|
|
|
|
train_data = data[:train_size] |
|
|
val_data = data[train_size:train_size + val_size] |
|
|
test_data = data[train_size + val_size:] |
|
|
|
|
|
print(f"Train: {len(train_data)}, Validation: {len(val_data)}, Test: {len(test_data)}") |
|
|
|
|
|
|
|
|
VAL_DIR.mkdir(exist_ok=True) |
|
|
TEST_DIR.mkdir(exist_ok=True) |
|
|
|
|
|
print("\nMoving files...") |
|
|
|
|
|
|
|
|
print("Processing validation set...") |
|
|
for entry in val_data: |
|
|
src = TRAIN_DIR / entry["file_name"] |
|
|
dst = VAL_DIR / entry["file_name"] |
|
|
if src.exists(): |
|
|
shutil.move(str(src), str(dst)) |
|
|
|
|
|
|
|
|
print("Processing test set...") |
|
|
for entry in test_data: |
|
|
src = TRAIN_DIR / entry["file_name"] |
|
|
dst = TEST_DIR / entry["file_name"] |
|
|
if src.exists(): |
|
|
shutil.move(str(src), str(dst)) |
|
|
|
|
|
|
|
|
print("\nWriting metadata files...") |
|
|
|
|
|
|
|
|
train_metadata_path = TRAIN_DIR / "metadata.jsonl" |
|
|
with open(train_metadata_path, "w") as f: |
|
|
for entry in train_data: |
|
|
f.write(json.dumps(entry) + "\n") |
|
|
|
|
|
|
|
|
val_metadata_path = VAL_DIR / "metadata.jsonl" |
|
|
with open(val_metadata_path, "w") as f: |
|
|
for entry in val_data: |
|
|
f.write(json.dumps(entry) + "\n") |
|
|
|
|
|
|
|
|
test_metadata_path = TEST_DIR / "metadata.jsonl" |
|
|
with open(test_metadata_path, "w") as f: |
|
|
for entry in test_data: |
|
|
f.write(json.dumps(entry) + "\n") |
|
|
|
|
|
print("\nDone!") |
|
|
print(f"Train set: {len(train_data)} samples in {TRAIN_DIR}") |
|
|
print(f"Validation set: {len(val_data)} samples in {VAL_DIR}") |
|
|
print(f"Test set: {len(test_data)} samples in {TEST_DIR}") |
|
|
|
|
|
|
|
|
print("\nSample entry format:") |
|
|
print(json.dumps(train_data[0], indent=2)) |
|
|
|
|
|
|