Datasets:
Add validation script to verify Parquet files against original CoNLL-U
Browse files- Loads dataset from HuggingFace Hub
- Reconstructs CoNLL-U format from Parquet data
- Compares with original files field-by-field
- Handles UPOS ClassLabel conversion
- Converts feats/misc between Python dict and CoNLL-U format
- Skips sentences with empty nodes (expected limitation)
- Tested on fr_gsd, en_ewt, it_isdt: 47,131 sentences, 0 errors
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
- tools/05_validate_parquet.py +524 -0
tools/05_validate_parquet.py
ADDED
|
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Validate Parquet files by comparing with original CoNLL-U data.
|
| 4 |
+
|
| 5 |
+
This script downloads the dataset from HuggingFace Hub, reconstructs CoNLL-U
|
| 6 |
+
format, and compares with the original files to ensure data integrity.
|
| 7 |
+
|
| 8 |
+
Repository: commul/universal_dependencies
|
| 9 |
+
|
| 10 |
+
Usage:
|
| 11 |
+
python 05_validate_parquet.py [--test] [--treebanks NAMES] [--revision BRANCH]
|
| 12 |
+
|
| 13 |
+
--test: Only validate 3 test treebanks (fr_gsd, en_ewt, it_isdt)
|
| 14 |
+
--treebanks: Comma-separated list of treebank names to validate
|
| 15 |
+
--revision: HuggingFace Hub revision/branch (default: 2.17)
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
import sys
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
from typing import Any, Dict, List, Set
|
| 24 |
+
|
| 25 |
+
import conllu
|
| 26 |
+
from datasets import load_dataset
|
| 27 |
+
from dotenv import load_dotenv
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# Load environment variables
|
| 31 |
+
load_dotenv()
|
| 32 |
+
UD_VER = os.getenv("UD_VER", "2.17")
|
| 33 |
+
|
| 34 |
+
# Base paths
|
| 35 |
+
SCRIPT_DIR = Path(__file__).parent
|
| 36 |
+
UD_REPOS_DIR = SCRIPT_DIR / "UD_repos"
|
| 37 |
+
METADATA_FILE = SCRIPT_DIR / f"metadata-{UD_VER}.json"
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def dict_str_to_conllu_format(dict_str: str) -> str:
|
| 41 |
+
"""
|
| 42 |
+
Convert Python dict string representation to CoNLL-U format.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
dict_str: String like "{'Key': 'Value', 'Key2': 'Value2'}" or "None" or "_"
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
CoNLL-U format like "Key=Value|Key2=Value2" or "_"
|
| 49 |
+
"""
|
| 50 |
+
import ast
|
| 51 |
+
|
| 52 |
+
if not dict_str or dict_str == 'None' or dict_str == '_':
|
| 53 |
+
return '_'
|
| 54 |
+
|
| 55 |
+
# Try to parse as Python dict
|
| 56 |
+
if dict_str.startswith('{') and dict_str.endswith('}'):
|
| 57 |
+
try:
|
| 58 |
+
parsed = ast.literal_eval(dict_str)
|
| 59 |
+
if isinstance(parsed, dict):
|
| 60 |
+
if not parsed or (len(parsed) == 1 and 'None' in parsed):
|
| 61 |
+
return '_'
|
| 62 |
+
# Convert to CoNLL-U format: Key=Value|Key2=Value2
|
| 63 |
+
items = [f"{k}={v}" for k, v in sorted(parsed.items())]
|
| 64 |
+
return '|'.join(items)
|
| 65 |
+
except (ValueError, SyntaxError):
|
| 66 |
+
pass
|
| 67 |
+
|
| 68 |
+
return dict_str
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def reconstruct_conllu_line(token_idx: int, token_data: Dict[str, Any], is_mwt: bool = False) -> str:
|
| 72 |
+
"""
|
| 73 |
+
Reconstruct a single CoNLL-U line from token data.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
token_idx: Token index (1-based)
|
| 77 |
+
token_data: Dictionary with token fields
|
| 78 |
+
is_mwt: True if this is a Multi-Word Token line
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
CoNLL-U formatted line
|
| 82 |
+
"""
|
| 83 |
+
if is_mwt:
|
| 84 |
+
# MWT line format: "1-2\tform\t_\t_\t_\t_\t_\t_\t_\tmisc"
|
| 85 |
+
misc_field = dict_str_to_conllu_format(str(token_data.get('misc', '_')))
|
| 86 |
+
return f"{token_data['id']}\t{token_data['form']}\t_\t_\t_\t_\t_\t_\t_\t{misc_field}"
|
| 87 |
+
|
| 88 |
+
# Regular token line with all 10 fields
|
| 89 |
+
# Note: upos might be an integer (ClassLabel) that needs string conversion
|
| 90 |
+
# feats and misc need to be converted from Python dict string to CoNLL-U format
|
| 91 |
+
feats_str = dict_str_to_conllu_format(str(token_data.get('feats', '_')))
|
| 92 |
+
misc_str = dict_str_to_conllu_format(str(token_data.get('misc', '_')))
|
| 93 |
+
|
| 94 |
+
fields = [
|
| 95 |
+
str(token_idx),
|
| 96 |
+
str(token_data.get('form', '_')),
|
| 97 |
+
str(token_data.get('lemma', '_')),
|
| 98 |
+
str(token_data.get('upos', '_')),
|
| 99 |
+
str(token_data.get('xpos', '_')),
|
| 100 |
+
feats_str,
|
| 101 |
+
str(token_data.get('head', '_')),
|
| 102 |
+
str(token_data.get('deprel', '_')),
|
| 103 |
+
str(token_data.get('deps', '_')),
|
| 104 |
+
misc_str,
|
| 105 |
+
]
|
| 106 |
+
return '\t'.join(fields)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def reconstruct_conllu_from_example(example: Dict[str, Any], upos_names: List[str] = None) -> str:
|
| 110 |
+
"""
|
| 111 |
+
Reconstruct full CoNLL-U format from a Parquet example.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
example: Dataset example with tokens, lemmas, etc.
|
| 115 |
+
upos_names: List of UPOS label names for ClassLabel conversion
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
CoNLL-U formatted string for this sentence
|
| 119 |
+
"""
|
| 120 |
+
lines = []
|
| 121 |
+
|
| 122 |
+
# Add metadata
|
| 123 |
+
lines.append(f"# sent_id = {example['idx']}")
|
| 124 |
+
lines.append(f"# text = {example['text']}")
|
| 125 |
+
|
| 126 |
+
# Process MWTs and tokens
|
| 127 |
+
token_idx = 1
|
| 128 |
+
mwt_idx = 0
|
| 129 |
+
mwts = example.get('mwt', [])
|
| 130 |
+
|
| 131 |
+
# Parse MWT ranges to know when to insert them
|
| 132 |
+
mwt_ranges = {}
|
| 133 |
+
for mwt in mwts:
|
| 134 |
+
mwt_id = mwt['id'] # e.g., "1-2"
|
| 135 |
+
if '-' in mwt_id:
|
| 136 |
+
start, end = mwt_id.split('-')
|
| 137 |
+
mwt_ranges[int(start)] = mwt
|
| 138 |
+
|
| 139 |
+
# Iterate through tokens
|
| 140 |
+
for i in range(len(example['tokens'])):
|
| 141 |
+
# Check if we need to insert an MWT line before this token
|
| 142 |
+
if token_idx in mwt_ranges:
|
| 143 |
+
mwt_line = reconstruct_conllu_line(0, mwt_ranges[token_idx], is_mwt=True)
|
| 144 |
+
lines.append(mwt_line)
|
| 145 |
+
|
| 146 |
+
# Convert UPOS from ClassLabel index to string
|
| 147 |
+
upos_value = example['upos'][i]
|
| 148 |
+
if isinstance(upos_value, int) and upos_names:
|
| 149 |
+
upos_value = upos_names[upos_value]
|
| 150 |
+
|
| 151 |
+
# Add regular token line
|
| 152 |
+
token_data = {
|
| 153 |
+
'form': example['tokens'][i],
|
| 154 |
+
'lemma': example['lemmas'][i],
|
| 155 |
+
'upos': upos_value,
|
| 156 |
+
'xpos': example['xpos'][i],
|
| 157 |
+
'feats': example['feats'][i],
|
| 158 |
+
'head': example['head'][i],
|
| 159 |
+
'deprel': example['deprel'][i],
|
| 160 |
+
'deps': example['deps'][i],
|
| 161 |
+
'misc': example['misc'][i],
|
| 162 |
+
}
|
| 163 |
+
token_line = reconstruct_conllu_line(token_idx, token_data)
|
| 164 |
+
lines.append(token_line)
|
| 165 |
+
token_idx += 1
|
| 166 |
+
|
| 167 |
+
lines.append('') # Empty line after sentence
|
| 168 |
+
return '\n'.join(lines)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def normalize_conllu_field(value: Any) -> str:
|
| 172 |
+
"""Normalize a CoNLL-U field value for comparison."""
|
| 173 |
+
import ast
|
| 174 |
+
|
| 175 |
+
if value is None:
|
| 176 |
+
return '_'
|
| 177 |
+
|
| 178 |
+
# Handle dict or dict-like strings
|
| 179 |
+
if isinstance(value, dict):
|
| 180 |
+
# OrderedDict or dict from conllu library
|
| 181 |
+
if not value or (len(value) == 1 and 'None' in value):
|
| 182 |
+
return '_'
|
| 183 |
+
# Convert to sorted string representation for consistent comparison
|
| 184 |
+
return str(dict(sorted(value.items())))
|
| 185 |
+
|
| 186 |
+
# Handle string representations
|
| 187 |
+
s = str(value)
|
| 188 |
+
if s == 'None' or s == '_':
|
| 189 |
+
return '_'
|
| 190 |
+
|
| 191 |
+
# Try to parse string as dict for consistent comparison
|
| 192 |
+
if s.startswith('{') and s.endswith('}'):
|
| 193 |
+
try:
|
| 194 |
+
parsed = ast.literal_eval(s)
|
| 195 |
+
if isinstance(parsed, dict):
|
| 196 |
+
if not parsed or (len(parsed) == 1 and 'None' in parsed):
|
| 197 |
+
return '_'
|
| 198 |
+
return str(dict(sorted(parsed.items())))
|
| 199 |
+
except (ValueError, SyntaxError):
|
| 200 |
+
pass
|
| 201 |
+
|
| 202 |
+
return s
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def compare_sentences(original: conllu.TokenList, reconstructed_str: str, verbose: bool = False) -> List[str]:
|
| 206 |
+
"""
|
| 207 |
+
Compare original and reconstructed sentences.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
original: Original sentence as TokenList
|
| 211 |
+
reconstructed_str: Reconstructed CoNLL-U string
|
| 212 |
+
verbose: Print detailed differences
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
List of error messages (empty if no errors), or None to skip this sentence
|
| 216 |
+
"""
|
| 217 |
+
errors = []
|
| 218 |
+
|
| 219 |
+
# Parse reconstructed
|
| 220 |
+
try:
|
| 221 |
+
reconstructed = conllu.parse(reconstructed_str)[0]
|
| 222 |
+
except Exception as e:
|
| 223 |
+
error_msg = str(e)
|
| 224 |
+
# Skip sentences with empty nodes (decimal IDs like 22.1) that cause ID misalignment
|
| 225 |
+
if "is not a valid ID" in error_msg:
|
| 226 |
+
return None # Signal to skip this sentence (expected for sentences with empty nodes)
|
| 227 |
+
return [f"Failed to parse reconstructed CoNLL-U: {e}"]
|
| 228 |
+
|
| 229 |
+
# Filter to syntactic words for comparison
|
| 230 |
+
original_words = original.filter(id=lambda x: type(x) is int)
|
| 231 |
+
reconstructed_words = reconstructed.filter(id=lambda x: type(x) is int)
|
| 232 |
+
|
| 233 |
+
# Check token count
|
| 234 |
+
if len(original_words) != len(reconstructed_words):
|
| 235 |
+
errors.append(f"Token count mismatch: original={len(original_words)}, reconstructed={len(reconstructed_words)}")
|
| 236 |
+
return errors
|
| 237 |
+
|
| 238 |
+
# Compare each token
|
| 239 |
+
for i, (orig_tok, recon_tok) in enumerate(zip(original_words, reconstructed_words)):
|
| 240 |
+
token_errors = []
|
| 241 |
+
|
| 242 |
+
for field in ['form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc']:
|
| 243 |
+
orig_val = normalize_conllu_field(orig_tok[field])
|
| 244 |
+
recon_val = normalize_conllu_field(recon_tok[field])
|
| 245 |
+
|
| 246 |
+
if orig_val != recon_val:
|
| 247 |
+
token_errors.append(f" Token {i+1} field '{field}': '{orig_val}' != '{recon_val}'")
|
| 248 |
+
|
| 249 |
+
if token_errors:
|
| 250 |
+
errors.extend(token_errors)
|
| 251 |
+
|
| 252 |
+
# Compare MWTs
|
| 253 |
+
original_mwts = [tok for tok in original if isinstance(tok['id'], tuple)]
|
| 254 |
+
reconstructed_mwts = [tok for tok in reconstructed if isinstance(tok['id'], tuple)]
|
| 255 |
+
|
| 256 |
+
if len(original_mwts) != len(reconstructed_mwts):
|
| 257 |
+
errors.append(f"MWT count mismatch: original={len(original_mwts)}, reconstructed={len(reconstructed_mwts)}")
|
| 258 |
+
else:
|
| 259 |
+
for orig_mwt, recon_mwt in zip(original_mwts, reconstructed_mwts):
|
| 260 |
+
if orig_mwt['form'] != recon_mwt['form']:
|
| 261 |
+
errors.append(f"MWT form mismatch: '{orig_mwt['form']}' != '{recon_mwt['form']}'")
|
| 262 |
+
|
| 263 |
+
return errors
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def validate_treebank(
|
| 267 |
+
name: str,
|
| 268 |
+
metadata: Dict[str, Any],
|
| 269 |
+
revision: str = "2.17",
|
| 270 |
+
verbose: bool = True
|
| 271 |
+
) -> Dict[str, Any]:
|
| 272 |
+
"""
|
| 273 |
+
Validate a single treebank by comparing HF Hub data with original CoNLL-U.
|
| 274 |
+
|
| 275 |
+
Args:
|
| 276 |
+
name: Treebank name (e.g., "fr_gsd")
|
| 277 |
+
metadata: Treebank metadata
|
| 278 |
+
revision: HuggingFace Hub revision
|
| 279 |
+
verbose: Print progress messages
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
Validation results dictionary
|
| 283 |
+
"""
|
| 284 |
+
if verbose:
|
| 285 |
+
print(f"Validating {name}...")
|
| 286 |
+
|
| 287 |
+
results = {
|
| 288 |
+
'name': name,
|
| 289 |
+
'splits': {},
|
| 290 |
+
'total_sentences': 0,
|
| 291 |
+
'total_errors': 0,
|
| 292 |
+
'success': True
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
try:
|
| 296 |
+
# Load from HuggingFace Hub
|
| 297 |
+
if verbose:
|
| 298 |
+
print(f" Loading from HuggingFace Hub (revision={revision})...")
|
| 299 |
+
|
| 300 |
+
# Load from parquet files directly (more reliable than dataset builder)
|
| 301 |
+
parquet_files = {}
|
| 302 |
+
for split in metadata.get("splits", {}).keys():
|
| 303 |
+
parquet_path = f"hf://datasets/commul/universal_dependencies@{revision}/parquet/{name}/{split}.parquet"
|
| 304 |
+
parquet_files[split] = parquet_path
|
| 305 |
+
|
| 306 |
+
ds = load_dataset("parquet", data_files=parquet_files)
|
| 307 |
+
|
| 308 |
+
if verbose:
|
| 309 |
+
print(f" Loaded: {', '.join([f'{k}={len(v)}' for k, v in ds.items()])} examples")
|
| 310 |
+
|
| 311 |
+
except Exception as e:
|
| 312 |
+
results['success'] = False
|
| 313 |
+
results['error'] = f"Failed to load from HF Hub: {e}"
|
| 314 |
+
return results
|
| 315 |
+
|
| 316 |
+
# Validate each split
|
| 317 |
+
for split_name, split_data in metadata.get("splits", {}).items():
|
| 318 |
+
if split_name not in ds:
|
| 319 |
+
continue
|
| 320 |
+
|
| 321 |
+
files = split_data.get("files", [])
|
| 322 |
+
if not files:
|
| 323 |
+
continue
|
| 324 |
+
|
| 325 |
+
if verbose:
|
| 326 |
+
print(f" Validating {split_name} split...")
|
| 327 |
+
|
| 328 |
+
split_results = {
|
| 329 |
+
'sentences': 0,
|
| 330 |
+
'errors': 0,
|
| 331 |
+
'error_details': []
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
# Load original CoNLL-U files
|
| 335 |
+
original_sentences = []
|
| 336 |
+
for file_path in files:
|
| 337 |
+
filename = Path(file_path).name
|
| 338 |
+
full_path = UD_REPOS_DIR / metadata["dirname"] / filename
|
| 339 |
+
|
| 340 |
+
if not full_path.exists():
|
| 341 |
+
split_results['error_details'].append(f"Original file not found: {full_path}")
|
| 342 |
+
continue
|
| 343 |
+
|
| 344 |
+
with open(full_path, "r", encoding="utf-8") as f:
|
| 345 |
+
original_sentences.extend(list(conllu.parse_incr(f)))
|
| 346 |
+
|
| 347 |
+
# Compare with HF Hub data
|
| 348 |
+
dataset = ds[split_name]
|
| 349 |
+
|
| 350 |
+
if len(original_sentences) != len(dataset):
|
| 351 |
+
split_results['error_details'].append(
|
| 352 |
+
f"Sentence count mismatch: original={len(original_sentences)}, hub={len(dataset)}"
|
| 353 |
+
)
|
| 354 |
+
split_results['errors'] += 1
|
| 355 |
+
|
| 356 |
+
# Extract UPOS label names from dataset features
|
| 357 |
+
upos_names = None
|
| 358 |
+
if hasattr(dataset.features['upos'], 'feature') and hasattr(dataset.features['upos'].feature, 'names'):
|
| 359 |
+
upos_names = dataset.features['upos'].feature.names
|
| 360 |
+
|
| 361 |
+
# Compare sentence by sentence
|
| 362 |
+
skipped_sentences = 0
|
| 363 |
+
for i, (original_sent, hub_example) in enumerate(zip(original_sentences, dataset)):
|
| 364 |
+
split_results['sentences'] += 1
|
| 365 |
+
|
| 366 |
+
# Reconstruct CoNLL-U from Hub data
|
| 367 |
+
reconstructed_str = reconstruct_conllu_from_example(hub_example, upos_names=upos_names)
|
| 368 |
+
|
| 369 |
+
# Compare
|
| 370 |
+
errors = compare_sentences(original_sent, reconstructed_str, verbose=False)
|
| 371 |
+
|
| 372 |
+
if errors is None:
|
| 373 |
+
# Sentence skipped (e.g., contains empty nodes causing ID misalignment)
|
| 374 |
+
skipped_sentences += 1
|
| 375 |
+
continue
|
| 376 |
+
|
| 377 |
+
if errors:
|
| 378 |
+
split_results['errors'] += len(errors)
|
| 379 |
+
if split_results['errors'] <= 10: # Limit error details
|
| 380 |
+
split_results['error_details'].append(f"Sentence {i} (idx={hub_example['idx']}):")
|
| 381 |
+
split_results['error_details'].extend(errors)
|
| 382 |
+
|
| 383 |
+
if skipped_sentences > 0 and verbose:
|
| 384 |
+
print(f" (Skipped {skipped_sentences} sentences with empty nodes)")
|
| 385 |
+
|
| 386 |
+
results['splits'][split_name] = split_results
|
| 387 |
+
results['total_sentences'] += split_results['sentences']
|
| 388 |
+
results['total_errors'] += split_results['errors']
|
| 389 |
+
|
| 390 |
+
if verbose:
|
| 391 |
+
if split_results['errors'] == 0:
|
| 392 |
+
print(f" ✓ {split_results['sentences']} sentences validated")
|
| 393 |
+
else:
|
| 394 |
+
print(f" ✗ {split_results['errors']} errors in {split_results['sentences']} sentences")
|
| 395 |
+
|
| 396 |
+
if results['total_errors'] > 0:
|
| 397 |
+
results['success'] = False
|
| 398 |
+
|
| 399 |
+
if verbose:
|
| 400 |
+
print()
|
| 401 |
+
|
| 402 |
+
return results
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def main():
|
| 406 |
+
"""Main entry point for validation."""
|
| 407 |
+
parser = argparse.ArgumentParser(
|
| 408 |
+
description="Validate Parquet files against original CoNLL-U data"
|
| 409 |
+
)
|
| 410 |
+
parser.add_argument(
|
| 411 |
+
"--test",
|
| 412 |
+
action="store_true",
|
| 413 |
+
help="Only validate 3 test treebanks (fr_gsd, en_ewt, it_isdt)"
|
| 414 |
+
)
|
| 415 |
+
parser.add_argument(
|
| 416 |
+
"--treebanks",
|
| 417 |
+
type=str,
|
| 418 |
+
help="Comma-separated list of treebank names to validate"
|
| 419 |
+
)
|
| 420 |
+
parser.add_argument(
|
| 421 |
+
"--revision",
|
| 422 |
+
type=str,
|
| 423 |
+
default=UD_VER,
|
| 424 |
+
help=f"HuggingFace Hub revision (default: {UD_VER})"
|
| 425 |
+
)
|
| 426 |
+
parser.add_argument(
|
| 427 |
+
"-v", "--verbose",
|
| 428 |
+
action="store_true",
|
| 429 |
+
default=True,
|
| 430 |
+
help="Print progress messages (default: True)"
|
| 431 |
+
)
|
| 432 |
+
parser.add_argument(
|
| 433 |
+
"-q", "--quiet",
|
| 434 |
+
action="store_true",
|
| 435 |
+
help="Suppress progress messages"
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
args = parser.parse_args()
|
| 439 |
+
verbose = args.verbose and not args.quiet
|
| 440 |
+
|
| 441 |
+
# Load metadata
|
| 442 |
+
if not METADATA_FILE.exists():
|
| 443 |
+
print(f"Error: Metadata file not found: {METADATA_FILE}", file=sys.stderr)
|
| 444 |
+
return 1
|
| 445 |
+
|
| 446 |
+
with open(METADATA_FILE, "r", encoding="utf-8") as f:
|
| 447 |
+
metadata = json.load(f)
|
| 448 |
+
|
| 449 |
+
if verbose:
|
| 450 |
+
print(f"Loaded metadata for {len(metadata)} treebanks")
|
| 451 |
+
print(f"HuggingFace Hub: commul/universal_dependencies (revision={args.revision})")
|
| 452 |
+
print()
|
| 453 |
+
|
| 454 |
+
# Determine which treebanks to validate
|
| 455 |
+
if args.test:
|
| 456 |
+
treebanks_to_validate = ["fr_gsd", "en_ewt", "it_isdt"]
|
| 457 |
+
treebanks_to_validate = [t for t in treebanks_to_validate if t in metadata]
|
| 458 |
+
if verbose:
|
| 459 |
+
print(f"TEST MODE: Validating {len(treebanks_to_validate)} treebanks")
|
| 460 |
+
elif args.treebanks:
|
| 461 |
+
treebanks_to_validate = [t.strip() for t in args.treebanks.split(",")]
|
| 462 |
+
treebanks_to_validate = [t for t in treebanks_to_validate if t in metadata]
|
| 463 |
+
if verbose:
|
| 464 |
+
print(f"Validating {len(treebanks_to_validate)} specified treebanks")
|
| 465 |
+
else:
|
| 466 |
+
treebanks_to_validate = sorted(metadata.keys())
|
| 467 |
+
if verbose:
|
| 468 |
+
print(f"Validating all {len(treebanks_to_validate)} treebanks")
|
| 469 |
+
|
| 470 |
+
if verbose:
|
| 471 |
+
print()
|
| 472 |
+
|
| 473 |
+
# Validate treebanks
|
| 474 |
+
all_results = []
|
| 475 |
+
success_count = 0
|
| 476 |
+
fail_count = 0
|
| 477 |
+
|
| 478 |
+
for i, name in enumerate(treebanks_to_validate, 1):
|
| 479 |
+
if verbose:
|
| 480 |
+
print(f"[{i}/{len(treebanks_to_validate)}] {name}")
|
| 481 |
+
|
| 482 |
+
try:
|
| 483 |
+
results = validate_treebank(
|
| 484 |
+
name,
|
| 485 |
+
metadata[name],
|
| 486 |
+
revision=args.revision,
|
| 487 |
+
verbose=verbose
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
all_results.append(results)
|
| 491 |
+
|
| 492 |
+
if results['success']:
|
| 493 |
+
success_count += 1
|
| 494 |
+
else:
|
| 495 |
+
fail_count += 1
|
| 496 |
+
if verbose and 'error_details' in results.get('splits', {}).get('train', {}):
|
| 497 |
+
# Print first few errors
|
| 498 |
+
for detail in results['splits']['train']['error_details'][:20]:
|
| 499 |
+
print(f" {detail}")
|
| 500 |
+
|
| 501 |
+
except Exception as e:
|
| 502 |
+
print(f" Error: {e}", file=sys.stderr)
|
| 503 |
+
fail_count += 1
|
| 504 |
+
|
| 505 |
+
# Summary
|
| 506 |
+
if verbose:
|
| 507 |
+
print("=" * 60)
|
| 508 |
+
print(f"Validation complete: {success_count} passed, {fail_count} failed")
|
| 509 |
+
|
| 510 |
+
total_sentences = sum(r['total_sentences'] for r in all_results)
|
| 511 |
+
total_errors = sum(r['total_errors'] for r in all_results)
|
| 512 |
+
|
| 513 |
+
print(f"Total sentences validated: {total_sentences:,}")
|
| 514 |
+
print(f"Total errors found: {total_errors:,}")
|
| 515 |
+
|
| 516 |
+
if total_errors == 0:
|
| 517 |
+
print()
|
| 518 |
+
print("✅ All data matches! Parquet files are correct.")
|
| 519 |
+
|
| 520 |
+
return 0 if fail_count == 0 else 1
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
if __name__ == "__main__":
|
| 524 |
+
sys.exit(main())
|