File size: 4,643 Bytes
e65ef8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
#!/usr/bin/env python3
"""
Dataset Validation and Analysis Tool

Validates and analyzes all n8n workflow training datasets,
supporting both JSON array and JSON Lines (JSONL) formats.
"""

import json
import os
from pathlib import Path
from typing import Dict, List, Any


def load_json_array(filepath: Path) -> List[Dict[str, Any]]:
    """Load standard JSON array format."""
    with open(filepath, 'r', encoding='utf-8') as f:
        return json.load(f)


def load_jsonl(filepath: Path) -> List[Dict[str, Any]]:
    """Load JSON Lines (JSONL) format."""
    examples = []
    with open(filepath, 'r', encoding='utf-8') as f:
        for line_num, line in enumerate(f, 1):
            line = line.strip()
            if line:  # Skip empty lines
                try:
                    examples.append(json.loads(line))
                except json.JSONDecodeError as e:
                    print(f"  โš ๏ธ  Line {line_num}: JSON decode error - {e}")
    return examples


def detect_format(filepath: Path) -> str:
    """Detect if file is JSON array or JSONL format."""
    with open(filepath, 'r', encoding='utf-8') as f:
        first_char = f.read(1).strip()
        if first_char == '[':
            return 'json_array'
        elif first_char == '{':
            return 'jsonl'
        else:
            return 'unknown'


def validate_dataset(filepath: Path) -> Dict[str, Any]:
    """Validate and analyze a single dataset file."""
    print(f"\n๐Ÿ“Š Analyzing: {filepath.name}")
    print(f"   Size: {filepath.stat().st_size:,} bytes")
    
    # Detect format
    fmt = detect_format(filepath)
    print(f"   Format: {fmt.upper().replace('_', ' ')}")
    
    result = {
        'filename': filepath.name,
        'size_bytes': filepath.stat().st_size,
        'format': fmt,
        'valid': False,
        'example_count': 0,
        'errors': []
    }
    
    # Load based on format
    try:
        if fmt == 'json_array':
            examples = load_json_array(filepath)
        elif fmt == 'jsonl':
            examples = load_jsonl(filepath)
        else:
            result['errors'].append(f"Unknown format: {fmt}")
            return result
        
        result['valid'] = True
        result['example_count'] = len(examples)
        
        # Validate structure of first example
        if examples:
            first = examples[0]
            required_fields = {'prompt', 'json', 'thinking'}
            missing = required_fields - set(first.keys())
            if missing:
                result['errors'].append(f"Missing fields in examples: {missing}")
        
        print(f"   โœ… Valid: {len(examples):,} examples")
        
    except Exception as e:
        result['errors'].append(str(e))
        print(f"   โŒ Error: {e}")
    
    return result


def main():
    """Main validation and analysis."""
    print("=" * 60)
    print("N8N DATASET VALIDATION & ANALYSIS")
    print("=" * 60)
    
    # Find all dataset files
    datasets_dir = Path(__file__).parent
    dataset_files = sorted(datasets_dir.glob('dataset_*.json'))
    
    if not dataset_files:
        print("โš ๏ธ  No dataset files found!")
        return
    
    results = []
    for filepath in dataset_files:
        result = validate_dataset(filepath)
        results.append(result)
    
    # Summary
    print("\n" + "=" * 60)
    print("SUMMARY")
    print("=" * 60)
    
    total_examples = sum(r['example_count'] for r in results)
    total_size = sum(r['size_bytes'] for r in results)
    valid_count = sum(1 for r in results if r['valid'])
    
    print(f"\n๐Ÿ“ Total Datasets: {len(results)}")
    print(f"โœ… Valid: {valid_count}")
    print(f"โŒ Invalid: {len(results) - valid_count}")
    print(f"๐Ÿ“ Total Examples: {total_examples:,}")
    print(f"๐Ÿ’พ Total Size: {total_size / (1024 * 1024):.2f} MB")
    
    # Detailed breakdown
    print("\n" + "-" * 60)
    print(f"{'Dataset':<20} {'Format':<12} {'Examples':>10} {'Size':>12}")
    print("-" * 60)
    
    for r in results:
        size_mb = r['size_bytes'] / (1024 * 1024)
        status = "โœ…" if r['valid'] else "โŒ"
        fmt = r['format'].replace('_', ' ').title()
        print(f"{status} {r['filename']:<18} {fmt:<12} {r['example_count']:>10,} {size_mb:>10.2f} MB")
    
    # Errors
    errors = [r for r in results if r['errors']]
    if errors:
        print("\n" + "=" * 60)
        print("ERRORS")
        print("=" * 60)
        for r in errors:
            print(f"\nโŒ {r['filename']}:")
            for err in r['errors']:
                print(f"   โ€ข {err}")
    
    print("\n" + "=" * 60)


if __name__ == '__main__':
    main()