File size: 3,074 Bytes
6e6a3a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#!/usr/bin/env python3
"""
Script to split the lines dataset into train/validation/test sets (80/10/10)
and transform the data format.
"""

import json
import os
import shutil
import random
from pathlib import Path

# Set random seed for reproducibility
random.seed(42)

# Define paths
BASE_DIR = Path("/Users/prasatee/Desktop/unsloth/DigitizePID_Dataset/lines_dataset")
TRAIN_DIR = BASE_DIR / "train"
VAL_DIR = BASE_DIR / "validation"
TEST_DIR = BASE_DIR / "test"

# Read current metadata
metadata_path = TRAIN_DIR / "metadata.jsonl"
data = []

print("Reading metadata...")
with open(metadata_path, "r") as f:
    for line in f:
        entry = json.loads(line.strip())
        # Transform to new format (flatten the "lines" field)
        new_entry = {
            "file_name": entry["file_name"],
            "source_image_idx": entry["source_image_idx"],
            "crop_idx": entry["crop_idx"],
            "width": entry["width"],
            "height": entry["height"],
            "segments": entry["lines"]["segments"],
            "line_types": entry["lines"]["line_types"],
            "pipelines": entry["lines"]["pipelines"],
        }
        data.append(new_entry)

print(f"Total entries: {len(data)}")

# Shuffle data
random.shuffle(data)

# Calculate split sizes
total = len(data)
train_size = int(0.8 * total)
val_size = int(0.1 * total)
test_size = total - train_size - val_size

train_data = data[:train_size]
val_data = data[train_size:train_size + val_size]
test_data = data[train_size + val_size:]

print(f"Train: {len(train_data)}, Validation: {len(val_data)}, Test: {len(test_data)}")

# Create directories
VAL_DIR.mkdir(exist_ok=True)
TEST_DIR.mkdir(exist_ok=True)

print("\nMoving files...")

# Move validation files
print("Processing validation set...")
for entry in val_data:
    src = TRAIN_DIR / entry["file_name"]
    dst = VAL_DIR / entry["file_name"]
    if src.exists():
        shutil.move(str(src), str(dst))

# Move test files
print("Processing test set...")
for entry in test_data:
    src = TRAIN_DIR / entry["file_name"]
    dst = TEST_DIR / entry["file_name"]
    if src.exists():
        shutil.move(str(src), str(dst))

# Write new metadata files
print("\nWriting metadata files...")

# Train metadata
train_metadata_path = TRAIN_DIR / "metadata.jsonl"
with open(train_metadata_path, "w") as f:
    for entry in train_data:
        f.write(json.dumps(entry) + "\n")

# Validation metadata
val_metadata_path = VAL_DIR / "metadata.jsonl"
with open(val_metadata_path, "w") as f:
    for entry in val_data:
        f.write(json.dumps(entry) + "\n")

# Test metadata
test_metadata_path = TEST_DIR / "metadata.jsonl"
with open(test_metadata_path, "w") as f:
    for entry in test_data:
        f.write(json.dumps(entry) + "\n")

print("\nDone!")
print(f"Train set: {len(train_data)} samples in {TRAIN_DIR}")
print(f"Validation set: {len(val_data)} samples in {VAL_DIR}")
print(f"Test set: {len(test_data)} samples in {TEST_DIR}")

# Verify first entry format
print("\nSample entry format:")
print(json.dumps(train_data[0], indent=2))