iiegn Claude Sonnet 4.5 commited on
Commit
2160041
·
verified ·
1 Parent(s): d0de5f4

Replace parquet generation and validation scripts with ud-hf-parquet-tools wrappers

Browse files

The large standalone scripts (800+ and 600+ lines) have been replaced with
simple wrappers (~90 lines each) that call the ud-hf-parquet-tools library.

Changes:
- tools/04_generate_parquet.py: Now wraps ud-hf-parquet-tools generate
- tools/05_validate_parquet.py: Now wraps ud-hf-parquet-tools validate
- Maintains same CLI interface for backward compatibility
- All functionality moved to reusable library

Benefits:
- Code reuse and maintainability
- Single source of truth for parquet generation/validation
- Library can be used by other projects
- Easier testing and debugging

Note: Requires ud-hf-parquet-tools to be published to PyPI or installed
locally. Until published, use the library directly from:
https://github.com/bot-zen/ud-hf-parquet-tools

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>

tools/04_generate_parquet.py CHANGED
@@ -3,803 +3,89 @@
3
  # /// script
4
  # requires-python = ">=3.12"
5
  # dependencies = [
6
- # "conllu",
7
- # "datasets",
8
- # "load-dotenv",
9
  # ]
10
  # ///
11
  """
12
  Generate Parquet files from Universal Dependencies CoNLL-U data.
13
 
14
- This script converts CoNLL-U files from UD repositories into Parquet format
15
- for efficient loading with HuggingFace datasets >=4.0.0.
16
-
17
- IMPORTANT: This script handles several CoNLL-U parsing issues to ensure
18
- 100% fidelity. See CONLLU_PARSING_ISSUES.md for full documentation of:
19
- - Double equals parsing bug (Gloss==POSS → None)
20
- - Duplicate metadata keys (multiple # media entries lost)
21
- - Empty metadata values (# text_en = ignored)
22
- - Keys without values (# newpar → "newpar = None")
23
 
24
  Repository: commul/universal_dependencies
25
 
26
  Usage:
27
- python 04_generate_parquet.py [--test] [--treebanks NAMES]
28
 
29
  --test: Only process 3 test treebanks (fr_gsd, en_ewt, it_isdt)
30
  --treebanks: Comma-separated list of treebank names to process
 
31
  """
32
 
33
  import argparse
34
- import json
35
  import os
36
  import sys
37
- import traceback
38
- import warnings
39
  from pathlib import Path
40
- from typing import Any, Dict, List
41
 
42
- import conllu
43
- import datasets
44
- import yaml
45
  from dotenv import load_dotenv
 
46
 
47
 
48
  # Load environment variables
49
  load_dotenv()
50
  UD_VER = os.getenv("UD_VER", "2.17")
51
 
52
- # Base paths
53
- SCRIPT_DIR = Path(__file__).parent
54
- REPO_ROOT = SCRIPT_DIR.parent
55
- UD_REPOS_DIR = SCRIPT_DIR / "UD_repos"
56
- PARQUET_OUTPUT_DIR = REPO_ROOT / "parquet"
57
- METADATA_FILE = SCRIPT_DIR / f"metadata-{UD_VER}.json"
58
- BLOCKED_TREEBANKS_FILE = SCRIPT_DIR / "blocked_treebanks.yaml"
59
-
60
- # Warning counters
61
- MALFORMED_FEATS_COUNT = 0
62
- UNSORTED_FEATS_COUNT = 0
63
-
64
-
65
- def sort_feats_dict(feats_dict: Dict[str, str]) -> Dict[str, str]:
66
- """
67
- Sort FEATS dictionary alphabetically by keys (case-insensitive) per UD spec.
68
-
69
- Also sorts values if they contain multiple comma-separated values.
70
-
71
- Args:
72
- feats_dict: Dictionary of feature name -> value
73
-
74
- Returns:
75
- Sorted dictionary (OrderedDict preserving sorted order)
76
- """
77
- from collections import OrderedDict
78
-
79
- sorted_dict = OrderedDict()
80
-
81
- # Sort keys case-insensitively
82
- for key in sorted(feats_dict.keys(), key=str.lower):
83
- value = feats_dict[key]
84
-
85
- # If value contains comma-separated items, sort them too
86
- if ',' in value:
87
- value_parts = [v.strip() for v in value.split(',')]
88
- value = ','.join(sorted(value_parts, key=str.lower))
89
-
90
- sorted_dict[key] = value
91
-
92
- return sorted_dict
93
-
94
-
95
- def is_feats_sorted(feats_dict: Dict[str, str]) -> bool:
96
- """Check if FEATS dictionary is sorted per UD spec."""
97
- keys = list(feats_dict.keys())
98
- sorted_keys = sorted(keys, key=str.lower)
99
-
100
- if keys != sorted_keys:
101
- return False
102
-
103
- # Check if values with commas are sorted
104
- for value in feats_dict.values():
105
- if ',' in value:
106
- value_parts = [v.strip() for v in value.split(',')]
107
- sorted_parts = sorted(value_parts, key=str.lower)
108
- if value_parts != sorted_parts:
109
- return False
110
-
111
- return True
112
-
113
-
114
- def conllu_dict_to_string(value: Any, field_name: str = "", sent_id: str = "", is_feats: bool = False) -> str:
115
- """
116
- Convert CoNLL-U field value to standard CoNLL-U string format.
117
- Used for reconstruction/output.
118
-
119
- Args:
120
- value: Field value (dict, OrderedDict, list, string, or None)
121
- field_name: Name of the field (for warnings)
122
- sent_id: Sentence ID (for warnings)
123
- is_feats: True if this is a FEATS field (will be sorted per UD spec)
124
-
125
- Returns:
126
- CoNLL-U format string ("Key=Val|Key2=Val2" or "_")
127
- """
128
- global MALFORMED_FEATS_COUNT, UNSORTED_FEATS_COUNT
129
-
130
- if value is None:
131
- return "_"
132
-
133
- if isinstance(value, dict):
134
- if not value:
135
- return "_"
136
-
137
- # For FEATS fields, check if sorted per UD spec (but don't auto-correct)
138
- if is_feats:
139
- # Check if original was sorted
140
- if not is_feats_sorted(value):
141
- UNSORTED_FEATS_COUNT += 1
142
- if UNSORTED_FEATS_COUNT <= 10: # Only show first 10 warnings
143
- orig_str = "|".join([f"{k}={v}" for k, v in value.items()])
144
- warnings.warn(
145
- f"FEATS field not sorted per UD spec in {sent_id}: {orig_str}",
146
- UserWarning
147
- )
148
- # Note: We preserve original order for fidelity, only warning about violations
149
-
150
- # Convert dict to CoNLL-U format: Key=Value|Key2=Value2
151
- # Handle malformed entries where value is None (just "Key" without "=Value")
152
- items = []
153
- for k, v in value.items():
154
- if v is None or v == "":
155
- MALFORMED_FEATS_COUNT += 1
156
- if MALFORMED_FEATS_COUNT <= 10: # Only show first 10 warnings
157
- warnings.warn(
158
- f"Malformed {field_name} entry in {sent_id}: '{k}' without '=Value'",
159
- UserWarning
160
- )
161
- items.append(k) # Malformed: just key without value
162
- else:
163
- items.append(f"{k}={v}")
164
- return "|".join(items)
165
-
166
- if isinstance(value, list):
167
- if not value:
168
- return "_"
169
- # Convert list of tuples to CoNLL-U DEPS format: head:rel|head2:rel2
170
- # The conllu library parses DEPS as list of (rel, head) tuples
171
- # Handle both integer heads and tuple heads (for empty nodes like (21, '.', 1))
172
- items = []
173
- for rel, head in value:
174
- if isinstance(head, tuple) and len(head) == 3:
175
- # Empty node reference: (21, '.', 1) -> "21.1"
176
- head_str = f"{head[0]}.{head[2]}"
177
- else:
178
- head_str = str(head)
179
- items.append(f"{head_str}:{rel}")
180
- return "|".join(items)
181
-
182
- # Already a string
183
- s = str(value)
184
- if s == "None":
185
- return "_"
186
- return s
187
-
188
-
189
- def conllu_optional_field(value: Any, field_name: str = "", sent_id: str = "", is_feats: bool = False) -> str | None:
190
- """
191
- Convert CoNLL-U optional field value to Python representation.
192
- Returns None for unspecified values (_), proper format otherwise.
193
-
194
- Use for: XPOS, FEATS, DEPS, MISC (optional fields per UD spec)
195
-
196
- Args:
197
- value: Field value (dict, OrderedDict, list, string, or None)
198
- field_name: Name of the field (for warnings)
199
- sent_id: Sentence ID (for warnings)
200
- is_feats: True if this is a FEATS field (will be sorted per UD spec)
201
-
202
- Returns:
203
- None for unspecified, or CoNLL-U format string
204
- """
205
- global MALFORMED_FEATS_COUNT, UNSORTED_FEATS_COUNT
206
-
207
- if value is None:
208
- return None
209
-
210
- if isinstance(value, dict):
211
- if not value:
212
- return None # Empty dict = no features
213
-
214
- # For FEATS fields, check if sorted per UD spec (but don't auto-correct)
215
- if is_feats:
216
- # Check if original was sorted
217
- if not is_feats_sorted(value):
218
- UNSORTED_FEATS_COUNT += 1
219
- if UNSORTED_FEATS_COUNT <= 10: # Only show first 10 warnings
220
- orig_str = "|".join([f"{k}={v}" for k, v in value.items()])
221
- warnings.warn(
222
- f"FEATS field not sorted per UD spec in {sent_id}: {orig_str}",
223
- UserWarning
224
- )
225
- # Note: We preserve original order for fidelity, only warning about violations
226
-
227
- # Convert dict to CoNLL-U format: Key=Value|Key2=Value2
228
- # Handle malformed entries where value is None (just "Key" without "=Value")
229
- items = []
230
- for k, v in value.items():
231
- if v is None or v == "":
232
- MALFORMED_FEATS_COUNT += 1
233
- if MALFORMED_FEATS_COUNT <= 10: # Only show first 10 warnings
234
- warnings.warn(
235
- f"Malformed {field_name} entry in {sent_id}: '{k}' without '=Value'",
236
- UserWarning
237
- )
238
- items.append(k) # Malformed: just key without value
239
- else:
240
- items.append(f"{k}={v}")
241
- return "|".join(items)
242
-
243
- if isinstance(value, list):
244
- if not value:
245
- return None # Empty list
246
- # Convert list of tuples to CoNLL-U DEPS format: head:rel|head2:rel2
247
- # The conllu library parses DEPS as list of (rel, head) tuples
248
- # Handle both integer heads and tuple heads (for empty nodes like (21, '.', 1))
249
- items = []
250
- for rel, head in value:
251
- if isinstance(head, tuple) and len(head) == 3:
252
- # Empty node reference: (21, '.', 1) -> "21.1"
253
- head_str = f"{head[0]}.{head[2]}"
254
- else:
255
- head_str = str(head)
256
- items.append(f"{head_str}:{rel}")
257
- return "|".join(items)
258
-
259
- # String value
260
- s = str(value)
261
- if s == "None" or s == "_" or s == "":
262
- return None
263
- return s
264
-
265
-
266
- def extract_raw_fields_from_sentence(sentence_text: str) -> dict:
267
- """
268
- Extract raw FEATS, XPOS, DEPS, and MISC fields directly from token lines.
269
-
270
- This bypasses the conllu library's parsing which has bugs with double equals (==).
271
-
272
- KNOWN ISSUE: Double equals parsing bug in conllu library
273
- Example: "Gloss==POSS.1SG.NOM|RX==[PRO]"
274
- - Correct: {'Gloss': '=POSS.1SG.NOM', 'RX': '=[PRO]'}
275
- - conllu parses: {'Gloss': None, 'RX': None}
276
-
277
- Affected treebanks: bej_autogramm (Beja), and any with values starting with "="
278
-
279
- See tools/CONLLU_PARSING_ISSUES.md for full documentation.
280
-
281
- Returns:
282
- Dict mapping token_id -> {'feats': str, 'xpos': str, 'deps': str, 'misc': str}
283
- """
284
- raw_fields = {}
285
-
286
- for line in sentence_text.split('\n'):
287
- line = line.strip()
288
- if not line or line.startswith('#'):
289
- continue
290
-
291
- # Split into 10 CoNLL-U fields
292
- fields = line.split('\t')
293
- if len(fields) < 10:
294
- continue
295
-
296
- token_id = fields[0]
297
- xpos = fields[4] if fields[4] != '_' else None
298
- feats = fields[5] if fields[5] != '_' else None
299
- deps = fields[8] if fields[8] != '_' else None
300
- misc = fields[9] if fields[9] != '_' else None
301
-
302
- raw_fields[token_id] = {
303
- 'xpos': xpos,
304
- 'feats': feats,
305
- 'deps': deps,
306
- 'misc': misc
307
- }
308
-
309
- return raw_fields
310
-
311
-
312
- def extract_raw_comments_from_sentence(sentence_text: str) -> tuple[list, str, str]:
313
- """
314
- Extract raw comment lines from a sentence before conllu parsing.
315
-
316
- This preserves metadata that conllu's dictionary-based storage loses:
317
-
318
- KNOWN ISSUES:
319
- 1. Duplicate metadata keys: be_hse has 1,216 sentences with duplicate "media" keys
320
- - conllu keeps only last value, we preserve all occurrences
321
- 2. Keys without values: "# newpar" stored as {'newpar': None}
322
- - We preserve as just "newpar" (not "newpar = None")
323
- 3. Empty values: "# text_en =" completely ignored by conllu
324
- - We preserve as "text_en ="
325
-
326
- See tools/CONLLU_PARSING_ISSUES.md for full documentation.
327
-
328
- Returns:
329
- (comments_list, sent_id, text) where comments_list preserves all comments
330
- including duplicates, empty values, and original ordering.
331
- """
332
- comments = []
333
- sent_id = None
334
- text = None
335
-
336
- for line in sentence_text.split('\n'):
337
- line = line.strip()
338
- if not line.startswith('#'):
339
- continue
340
-
341
- # Remove leading "# "
342
- comment_content = line[1:].strip()
343
-
344
- if not comment_content:
345
- continue
346
-
347
- # Check if it's a key=value pair
348
- if ' = ' in comment_content:
349
- key, value = comment_content.split(' = ', 1)
350
- key = key.strip()
351
- value = value.strip()
352
-
353
- if key == 'sent_id':
354
- sent_id = value
355
- comments.append("__SENT_ID__")
356
- elif key == 'text':
357
- text = value
358
- comments.append("__TEXT__")
359
- else:
360
- # Store raw: "key = value" (preserves duplicates and empty values)
361
- if value:
362
- comments.append(f"{key} = {value}")
363
- else:
364
- # Empty value like "# text_en ="
365
- comments.append(f"{key} =")
366
- else:
367
- # Comment without = (like "# newpar" or "# # newpar")
368
- comments.append(comment_content)
369
-
370
- return comments, sent_id, text
371
-
372
-
373
- def extract_examples_from_conllu(filepath: str) -> List[Dict[str, Any]]:
374
- """
375
- Extract examples from a CoNLL-U file with MWT and empty node support.
376
-
377
- Args:
378
- filepath: Path to the CoNLL-U file
379
-
380
- Returns:
381
- List of example dictionaries matching the dataset schema
382
- """
383
- examples = []
384
-
385
- # Read raw file to extract comments before conllu parsing
386
- with open(filepath, "r", encoding="utf-8") as f:
387
- file_content = f.read()
388
-
389
- # Split into sentence blocks
390
- sentence_blocks = file_content.split('\n\n')
391
-
392
- # Parse with conllu
393
- with open(filepath, "r", encoding="utf-8") as data_file:
394
- tokenlist = list(conllu.parse_incr(data_file))
395
-
396
- for idx, sent in enumerate(tokenlist):
397
- # Get raw comments and fields from original text
398
- if idx < len(sentence_blocks):
399
- comments, sent_id, text = extract_raw_comments_from_sentence(sentence_blocks[idx])
400
- raw_fields = extract_raw_fields_from_sentence(sentence_blocks[idx])
401
- else:
402
- comments = []
403
- sent_id = None
404
- text = None
405
- raw_fields = {}
406
-
407
- # Fallback to conllu metadata if needed
408
- if sent_id is None and "sent_id" in sent.metadata:
409
- sent_id = sent.metadata["sent_id"]
410
- if sent_id is None:
411
- sent_id = str(idx)
412
-
413
- if text is None and "text" in sent.metadata:
414
- text = sent.metadata["text"]
415
-
416
- # Note: comments and raw fields are now extracted from raw file above,
417
- # preserving duplicates and bypassing conllu parsing bugs
418
-
419
- # Extract Multi-Word Tokens (MWTs) - tokens with tuple IDs like (1, '-', 2)
420
- # Note: Exclude empty nodes which have '.' as middle element: (22, '.', 1)
421
- # Per UD spec: MWTs can have ID, FORM, MISC, and optionally FEATS (for "Typo=Yes")
422
- mwts = []
423
- for token in sent:
424
- if isinstance(token["id"], tuple) and len(token["id"]) == 3 and token["id"][1] == '-':
425
- # MWT line (e.g., (1, '-', 2) for "1-2")
426
- mwt_id = f"{token['id'][0]}-{token['id'][2]}"
427
-
428
- # Use raw fields if available (bypasses conllu parsing bugs)
429
- if mwt_id in raw_fields:
430
- feats = raw_fields[mwt_id]['feats']
431
- misc = raw_fields[mwt_id]['misc']
432
- else:
433
- # Fallback to conllu parsed values
434
- feats = conllu_optional_field(token["feats"], "MWT.FEATS", sent_id, is_feats=True)
435
- misc = conllu_optional_field(token["misc"], "MWT.MISC", sent_id)
436
-
437
- mwts.append({
438
- "id": mwt_id,
439
- "form": token["form"],
440
- "feats": feats,
441
- "misc": misc
442
- })
443
-
444
- # Extract Empty Nodes - tokens with decimal IDs like 22.1
445
- # These are represented as tuples: (22, '.', 1)
446
- empty_nodes = []
447
- for token in sent:
448
- if isinstance(token["id"], tuple) and len(token["id"]) == 3 and token["id"][1] == '.':
449
- # Empty node (e.g., (22, '.', 1) for ID "22.1")
450
- empty_node_id = f"{token['id'][0]}.{token['id'][2]}"
451
-
452
- # Use raw fields if available (bypasses conllu parsing bugs)
453
- if empty_node_id in raw_fields:
454
- xpos = raw_fields[empty_node_id]['xpos']
455
- feats = raw_fields[empty_node_id]['feats']
456
- deps = raw_fields[empty_node_id]['deps']
457
- misc = raw_fields[empty_node_id]['misc']
458
- else:
459
- # Fallback to conllu parsed values
460
- xpos = token["xpos"] or None
461
- feats = conllu_dict_to_string(token["feats"], "FEATS", f"{sent_id}:{empty_node_id}", is_feats=True)
462
- deps = conllu_dict_to_string(token["deps"], "DEPS", f"{sent_id}:{empty_node_id}")
463
- misc = conllu_dict_to_string(token["misc"], "MISC", f"{sent_id}:{empty_node_id}")
464
-
465
- empty_nodes.append({
466
- "id": empty_node_id,
467
- "form": token["form"],
468
- "lemma": token["lemma"] or "_",
469
- "upos": token["upos"] or "_",
470
- "xpos": xpos or "_",
471
- "feats": feats,
472
- "head": str(token["head"]) if token["head"] is not None else "_",
473
- "deprel": str(token["deprel"]) if token["deprel"] else "_",
474
- "deps": deps,
475
- "misc": misc
476
- })
477
-
478
- # Filter to syntactic words only (exclude MWTs and empty nodes)
479
- sent_filtered = sent.filter(id=lambda x: type(x) is int)
480
-
481
- # Extract token fields from syntactic words
482
- tokens = [token["form"] for token in sent_filtered]
483
-
484
- # If text wasn't in metadata, reconstruct from tokens
485
- if text is None:
486
- text = " ".join(tokens)
487
-
488
- # Extract fields for regular tokens, using raw fields when available
489
- xpos_list = []
490
- feats_list = []
491
- deps_list = []
492
- misc_list = []
493
-
494
- for token in sent_filtered:
495
- token_id = str(token["id"])
496
-
497
- # Use raw fields if available (bypasses conllu parsing bugs)
498
- if token_id in raw_fields:
499
- xpos_list.append(raw_fields[token_id]['xpos'])
500
- feats_list.append(raw_fields[token_id]['feats'])
501
- deps_list.append(raw_fields[token_id]['deps'])
502
- misc_list.append(raw_fields[token_id]['misc'])
503
- else:
504
- # Fallback to conllu parsed values
505
- xpos_list.append(conllu_optional_field(token["xpos"], "XPOS", sent_id))
506
- feats_list.append(conllu_optional_field(token["feats"], "FEATS", sent_id, is_feats=True))
507
- deps_list.append(conllu_optional_field(token["deps"], "DEPS", sent_id))
508
- misc_list.append(conllu_optional_field(token["misc"], "MISC", sent_id))
509
-
510
- # Create example with proper types per UD specification:
511
- # - Required fields (FORM, LEMMA, UPOS, HEAD, DEPREL): always string
512
- # - Optional fields (XPOS, FEATS, DEPS, MISC): None when unspecified
513
- example = {
514
- "sent_id": sent_id,
515
- "text": text,
516
- "comments": comments,
517
- "tokens": tokens,
518
- "lemmas": [token["lemma"] for token in sent_filtered],
519
- "upos": [token["upos"] for token in sent_filtered],
520
- "xpos": xpos_list,
521
- "feats": feats_list,
522
- "head": [str(token["head"]) if token["head"] is not None else "_" for token in sent_filtered],
523
- "deprel": [str(token["deprel"]) if token["deprel"] else "_" for token in sent_filtered],
524
- "deps": deps_list,
525
- "misc": misc_list,
526
- "mwt": mwts,
527
- "empty_nodes": empty_nodes,
528
- }
529
-
530
- examples.append(example)
531
-
532
- return examples
533
-
534
-
535
- def generate_parquet_for_treebank(
536
- name: str,
537
- metadata: Dict[str, Any],
538
- output_dir: Path,
539
- verbose: bool = True
540
- ) -> bool:
541
- """
542
- Generate Parquet files for a single treebank.
543
-
544
- Args:
545
- name: Treebank name (e.g., "fr_gsd")
546
- metadata: Treebank metadata including splits and file paths
547
- output_dir: Output directory for Parquet files
548
- verbose: Print progress messages
549
-
550
- Returns:
551
- True if successful, False otherwise
552
- """
553
- if verbose:
554
- print(f"Processing {name}...")
555
-
556
- # Create output directory for this treebank
557
- treebank_output_dir = output_dir / name
558
- treebank_output_dir.mkdir(parents=True, exist_ok=True)
559
-
560
- # Process each split
561
- dataset_dict = {}
562
-
563
- for split_name, split_data in metadata.get("splits", {}).items():
564
- files = split_data.get("files", [])
565
- if not files:
566
- continue
567
-
568
- if verbose:
569
- print(f" - {split_name}: {len(files)} file(s)")
570
-
571
- # Extract examples from all files in this split
572
- all_examples = []
573
- for file_path in files:
574
- # Extract just the filename from the path (metadata includes dirname/revision/filename)
575
- # e.g., "UD_French-GSD/r2.17/fr_gsd-ud-train.conllu" -> "fr_gsd-ud-train.conllu"
576
- filename = Path(file_path).name
577
-
578
- # Construct full path: UD_repos/dirname/filename
579
- full_path = UD_REPOS_DIR / metadata["dirname"] / filename
580
-
581
- if not full_path.exists():
582
- print(f" Warning: File not found: {full_path}", file=sys.stderr)
583
- continue
584
-
585
- try:
586
- examples = extract_examples_from_conllu(str(full_path))
587
- all_examples.extend(examples)
588
- except Exception as e:
589
- print(f" Error processing {full_path}: {e}", file=sys.stderr)
590
- return False
591
-
592
- if not all_examples:
593
- print(f" Warning: No examples extracted for {split_name}", file=sys.stderr)
594
- continue
595
-
596
- # Define features
597
- features = datasets.Features({
598
- "sent_id": datasets.Value("string"),
599
- "text": datasets.Value("string"),
600
- "comments": datasets.Sequence(datasets.Value("string")),
601
- "tokens": datasets.Sequence(datasets.Value("string")),
602
- "lemmas": datasets.Sequence(datasets.Value("string")),
603
- "upos": datasets.Sequence(
604
- datasets.features.ClassLabel(
605
- names=[
606
- "NOUN", "PUNCT", "ADP", "NUM", "SYM", "SCONJ",
607
- "ADJ", "PART", "DET", "CCONJ", "PROPN", "PRON",
608
- "X", "_", "ADV", "INTJ", "VERB", "AUX",
609
- ]
610
- )
611
- ),
612
- "xpos": datasets.Sequence(datasets.Value("string")),
613
- "feats": datasets.Sequence(datasets.Value("string")),
614
- "head": datasets.Sequence(datasets.Value("string")),
615
- "deprel": datasets.Sequence(datasets.Value("string")),
616
- "deps": datasets.Sequence(datasets.Value("string")),
617
- "misc": datasets.Sequence(datasets.Value("string")),
618
- "mwt": [{
619
- "id": datasets.Value("string"),
620
- "form": datasets.Value("string"),
621
- "feats": datasets.Value("string"),
622
- "misc": datasets.Value("string")
623
- }],
624
- "empty_nodes": [{
625
- "id": datasets.Value("string"),
626
- "form": datasets.Value("string"),
627
- "lemma": datasets.Value("string"),
628
- "upos": datasets.Value("string"),
629
- "xpos": datasets.Value("string"),
630
- "feats": datasets.Value("string"),
631
- "head": datasets.Value("string"),
632
- "deprel": datasets.Value("string"),
633
- "deps": datasets.Value("string"),
634
- "misc": datasets.Value("string")
635
- }],
636
- })
637
-
638
- # Create dataset from examples
639
- dataset = datasets.Dataset.from_list(all_examples, features=features)
640
- dataset_dict[split_name] = dataset
641
-
642
- if verbose:
643
- print(f" Created dataset with {len(dataset)} examples")
644
-
645
- if not dataset_dict:
646
- print(f" Warning: No splits processed for {name}", file=sys.stderr)
647
- return False
648
-
649
- # Create DatasetDict and save to Parquet
650
- dataset_dict_obj = datasets.DatasetDict(dataset_dict)
651
-
652
- try:
653
- # Save as Parquet files
654
- for split_name, dataset in dataset_dict_obj.items():
655
- parquet_path = treebank_output_dir / f"{split_name}.parquet"
656
- dataset.to_parquet(parquet_path)
657
- if verbose:
658
- print(f" Saved {split_name}.parquet ({parquet_path.stat().st_size / 1024 / 1024:.2f} MB)")
659
-
660
- return True
661
-
662
- except Exception as e:
663
- print(f" Error saving Parquet files: {e}", file=sys.stderr)
664
- return False
665
 
666
 
667
  def main():
668
- """Main entry point for Parquet generation."""
669
  parser = argparse.ArgumentParser(
670
- description="Generate Parquet files from UD CoNLL-U data"
671
- )
672
- parser.add_argument(
673
- "--test",
674
- action="store_true",
675
- help="Only process 3 test treebanks (fr_gsd, en_ewt, it_isdt)"
676
- )
677
- parser.add_argument(
678
- "--treebanks",
679
- type=str,
680
- help="Comma-separated list of treebank names to process"
681
- )
682
- parser.add_argument(
683
- "-v", "--verbose",
684
- action="store_true",
685
- default=True,
686
- help="Print progress messages (default: True)"
687
- )
688
- parser.add_argument(
689
- "-q", "--quiet",
690
- action="store_true",
691
- help="Suppress progress messages"
692
  )
 
 
 
 
 
693
 
694
  args = parser.parse_args()
695
- verbose = args.verbose and not args.quiet
696
-
697
- # Load metadata
698
- if not METADATA_FILE.exists():
699
- print(f"Error: Metadata file not found: {METADATA_FILE}", file=sys.stderr)
700
- print(f"Run 02_traverse_ud_repos.py first to generate metadata.", file=sys.stderr)
701
- return 1
702
 
703
- with open(METADATA_FILE, "r", encoding="utf-8") as f:
704
- metadata = json.load(f)
 
 
 
 
 
 
705
 
706
- # Load blocked treebanks list
707
- blocked_treebanks = {}
708
- if BLOCKED_TREEBANKS_FILE.exists():
709
- with open(BLOCKED_TREEBANKS_FILE, "r", encoding="utf-8") as f:
710
- blocked_data = yaml.safe_load(f)
711
- if blocked_data:
712
- blocked_treebanks = {k: v for k, v in blocked_data.items() if v is not None}
713
-
714
- if verbose:
715
- print(f"Loaded metadata for {len(metadata)} treebanks")
716
- if blocked_treebanks:
717
- print(f"Blocked treebanks: {len(blocked_treebanks)} ({', '.join(sorted(blocked_treebanks.keys()))})")
718
- print(f"Output directory: {PARQUET_OUTPUT_DIR}")
719
- print()
720
-
721
- # Determine which treebanks to process
722
  if args.test:
723
- # Test mode: process 3 diverse treebanks
724
- treebanks_to_process = ["fr_gsd", "en_ewt", "it_isdt"]
725
- treebanks_to_process = [t for t in treebanks_to_process if t in metadata]
726
- if verbose:
727
- print(f"TEST MODE: Processing {len(treebanks_to_process)} treebanks")
728
- elif args.treebanks:
729
- # User-specified treebanks
730
- treebanks_to_process = [t.strip() for t in args.treebanks.split(",")]
731
- treebanks_to_process = [t for t in treebanks_to_process if t in metadata]
732
- if verbose:
733
- print(f"Processing {len(treebanks_to_process)} specified treebanks")
734
- else:
735
- # All treebanks
736
- treebanks_to_process = sorted(metadata.keys())
737
- if verbose:
738
- print(f"Processing all {len(treebanks_to_process)} treebanks")
739
-
740
- # Filter out blocked treebanks
741
- if blocked_treebanks:
742
- original_count = len(treebanks_to_process)
743
- treebanks_to_process = [t for t in treebanks_to_process if t not in blocked_treebanks]
744
- skipped_count = original_count - len(treebanks_to_process)
745
- if skipped_count > 0 and verbose:
746
- print(f"Skipping {skipped_count} blocked treebank(s) due to license restrictions")
747
- for blocked in sorted(blocked_treebanks.keys()):
748
- if blocked in metadata:
749
- reason = blocked_treebanks[blocked].get('reason', 'Unknown')
750
- license_type = blocked_treebanks[blocked].get('license', 'Unknown')
751
- print(f" - {blocked}: {reason} (License: {license_type})")
752
-
753
- if verbose:
754
- print()
755
 
756
- # Process treebanks
757
- success_count = 0
758
- fail_count = 0
759
 
760
- for i, name in enumerate(treebanks_to_process, 1):
761
- if verbose:
762
- print(f"[{i}/{len(treebanks_to_process)}] {name}")
763
 
764
- try:
765
- success = generate_parquet_for_treebank(
766
- name,
767
- metadata[name],
768
- PARQUET_OUTPUT_DIR,
769
- verbose=verbose
770
- )
771
 
772
- if success:
773
- success_count += 1
774
- else:
775
- fail_count += 1
776
-
777
- except Exception as e:
778
- print(f" Error: {e}", file=sys.stderr)
779
- traceback.print_exc()
780
- fail_count += 1
781
-
782
- if verbose:
783
- print()
784
-
785
- # Summary
786
- if verbose:
787
- print("=" * 60)
788
- print(f"Completed: {success_count} successful, {fail_count} failed")
789
- print(f"Total output size: {sum(f.stat().st_size for f in PARQUET_OUTPUT_DIR.rglob('*.parquet')) / 1024 / 1024:.2f} MB")
790
-
791
- # Show warning summary
792
- if UNSORTED_FEATS_COUNT > 0:
793
- print(f"\n⚠️ Found {UNSORTED_FEATS_COUNT} unsorted FEATS fields (auto-corrected per UD spec)")
794
- if UNSORTED_FEATS_COUNT > 10:
795
- print(f" (Showing first 10 warnings, {UNSORTED_FEATS_COUNT - 10} more suppressed)")
796
-
797
- if MALFORMED_FEATS_COUNT > 0:
798
- print(f"\n⚠️ Found {MALFORMED_FEATS_COUNT} malformed entries (Key without =Value)")
799
- if MALFORMED_FEATS_COUNT > 10:
800
- print(f" (Showing first 10 warnings, {MALFORMED_FEATS_COUNT - 10} more suppressed)")
801
-
802
- return 0 if fail_count == 0 else 1
803
 
804
 
805
  if __name__ == "__main__":
 
3
  # /// script
4
  # requires-python = ">=3.12"
5
  # dependencies = [
6
+ # "ud-hf-parquet-tools",
7
+ # "python-dotenv",
 
8
  # ]
9
  # ///
10
  """
11
  Generate Parquet files from Universal Dependencies CoNLL-U data.
12
 
13
+ This script is a wrapper around the ud-hf-parquet-tools library.
14
+ For the actual implementation, see: https://github.com/bot-zen/ud-hf-parquet-tools
 
 
 
 
 
 
 
15
 
16
  Repository: commul/universal_dependencies
17
 
18
  Usage:
19
+ python 04_generate_parquet.py [--test] [--treebanks NAMES] [--overwrite]
20
 
21
  --test: Only process 3 test treebanks (fr_gsd, en_ewt, it_isdt)
22
  --treebanks: Comma-separated list of treebank names to process
23
+ --overwrite: Overwrite existing parquet files (default: skip existing)
24
  """
25
 
26
  import argparse
 
27
  import os
28
  import sys
 
 
29
  from pathlib import Path
 
30
 
 
 
 
31
  from dotenv import load_dotenv
32
+ from ud_hf_parquet_tools.cli import main as ud_tools_main
33
 
34
 
35
  # Load environment variables
36
  load_dotenv()
37
  UD_VER = os.getenv("UD_VER", "2.17")
38
 
39
+ # Project paths
40
+ REPO_ROOT = Path(__file__).parent.parent.resolve()
41
+ METADATA_FILE = REPO_ROOT / f"metadata-{UD_VER}.json"
42
+ UD_REPOS_DIR = REPO_ROOT / "tools" / "UD_repos"
43
+ OUTPUT_DIR = REPO_ROOT / "parquet"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
 
46
  def main():
47
+ """Main entry point - wrapper around ud-hf-parquet-tools."""
48
  parser = argparse.ArgumentParser(
49
+ description="Generate Parquet files from Universal Dependencies CoNLL-U data"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  )
51
+ parser.add_argument("--test", action="store_true", help="Test mode: process 3 treebanks only")
52
+ parser.add_argument("--treebanks", help="Comma-separated list of treebank names")
53
+ parser.add_argument("--overwrite", action="store_true", help="Overwrite existing parquet files")
54
+ parser.add_argument("-v", "--verbose", action="store_true", default=True, help="Verbose output")
55
+ parser.add_argument("-q", "--quiet", action="store_true", help="Quiet mode")
56
 
57
  args = parser.parse_args()
 
 
 
 
 
 
 
58
 
59
+ # Build arguments for ud-hf-parquet-tools
60
+ tool_args = [
61
+ "ud-hfp-tools",
62
+ "generate",
63
+ "--metadata", str(METADATA_FILE),
64
+ "--ud-repos-dir", str(UD_REPOS_DIR),
65
+ "--output-dir", str(OUTPUT_DIR),
66
+ ]
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  if args.test:
69
+ tool_args.append("--test")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ if args.treebanks:
72
+ tool_args.extend(["--treebanks", args.treebanks])
 
73
 
74
+ if args.overwrite:
75
+ tool_args.append("--overwrite")
 
76
 
77
+ if args.quiet:
78
+ tool_args.append("--quiet")
79
+ elif args.verbose:
80
+ tool_args.append("--verbose")
 
 
 
81
 
82
+ # Replace sys.argv and call the library's main function
83
+ original_argv = sys.argv
84
+ try:
85
+ sys.argv = tool_args
86
+ return ud_tools_main()
87
+ finally:
88
+ sys.argv = original_argv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
 
91
  if __name__ == "__main__":
tools/05_validate_parquet.py CHANGED
@@ -3,21 +3,20 @@
3
  # /// script
4
  # requires-python = ">=3.12"
5
  # dependencies = [
6
- # "datasets",
7
- # "load-dotenv",
8
  # ]
9
  # ///
10
  """
11
  Validate Parquet files by comparing with original CoNLL-U data.
12
 
 
 
 
13
  This script can validate both:
14
  - Remote: Downloaded from HuggingFace Hub
15
  - Local: Local parquet files in ../parquet/
16
 
17
- It offers two comparison modes:
18
- - text: Full CoNLL-U text comparison with unified diff (tests production reconstruction)
19
- - field: Field-by-field comparison with detailed error reporting
20
-
21
  Repository: commul/universal_dependencies
22
 
23
  Usage:
@@ -28,8 +27,9 @@ Options:
28
  --treebanks NAMES Comma-separated list of treebank names to validate
29
  --revision BRANCH HuggingFace Hub revision/branch (default: 2.17)
30
  --local Validate local parquet files instead of HuggingFace Hub
31
- --mode {text,field,both} Comparison mode (default: text)
32
  -v, --verbose Print progress messages (default: True)
 
33
  -q, --quiet Suppress progress messages
34
 
35
  Examples:
@@ -39,547 +39,83 @@ Examples:
39
  # Validate specific treebanks from HuggingFace Hub
40
  python 05_validate_parquet.py --treebanks fr_gsd,en_ewt --revision 2.17
41
 
42
- # Validate all local treebanks with field-by-field comparison
43
- python 05_validate_parquet.py --local --mode field
44
-
45
- # Validate with both comparison modes
46
- python 05_validate_parquet.py --test --local --mode both
47
  """
48
 
49
  import argparse
50
- import difflib
51
- import json
52
  import os
53
  import sys
54
  from pathlib import Path
55
- from typing import Any, Dict, List
56
 
57
- from datasets import load_dataset
58
  from dotenv import load_dotenv
 
59
 
60
 
61
  # Load environment variables
62
  load_dotenv()
63
  UD_VER = os.getenv("UD_VER", "2.17")
64
 
65
- # Base paths
66
- SCRIPT_DIR = Path(__file__).parent
67
- UD_REPOS_DIR = SCRIPT_DIR / "UD_repos"
68
- PARQUET_DIR = SCRIPT_DIR.parent / "parquet"
69
- METADATA_FILE = SCRIPT_DIR / f"metadata-{UD_VER}.json"
70
-
71
-
72
- def example_to_conllu(example: Dict[str, Any], upos_names: List[str] = None) -> str:
73
- """
74
- Convert a dataset example back to CoNLL-U format.
75
-
76
- This is the production reconstruction logic that validates the template's
77
- round-trip conversion capability.
78
-
79
- Args:
80
- example: Dataset example with all fields
81
- upos_names: Optional list of UPOS label names for ClassLabel conversion
82
-
83
- Returns:
84
- CoNLL-U formatted string for this sentence
85
- """
86
- lines = []
87
-
88
- # Add metadata in original order (comments list contains markers for sent_id and text positions)
89
- for comment in example.get('comments', []):
90
- if comment == "__SENT_ID__":
91
- lines.append(f"# sent_id = {example['sent_id']}")
92
- elif comment == "__TEXT__":
93
- lines.append(f"# text = {example['text']}")
94
- else:
95
- lines.append(f"# {comment}")
96
-
97
- # Parse MWT ranges to know when to insert them
98
- mwt_ranges = {}
99
- for mwt in example.get('mwt', []):
100
- mwt_id = mwt['id']
101
- if '-' in mwt_id:
102
- start, _ = mwt_id.split('-')
103
- mwt_ranges[int(start)] = mwt
104
-
105
- # Parse empty node positions
106
- empty_node_positions = {}
107
- for empty_node in example.get('empty_nodes', []):
108
- en_id = empty_node['id']
109
- if '.' in en_id:
110
- parent, _ = en_id.split('.')
111
- if int(parent) not in empty_node_positions:
112
- empty_node_positions[int(parent)] = []
113
- empty_node_positions[int(parent)].append(empty_node)
114
-
115
- # Insert empty nodes that come before token 1 (e.g., 0.1, 0.2)
116
- if 0 in empty_node_positions:
117
- for empty_node in empty_node_positions[0]:
118
- en_fields = [
119
- empty_node.get('id', '_'),
120
- empty_node.get('form', '_'),
121
- empty_node.get('lemma', '_'),
122
- empty_node.get('upos', '_'),
123
- empty_node.get('xpos') or '_',
124
- empty_node.get('feats') or '_',
125
- empty_node.get('head', '_'),
126
- empty_node.get('deprel', '_'),
127
- empty_node.get('deps') or '_',
128
- empty_node.get('misc') or '_',
129
- ]
130
- lines.append('\t'.join(en_fields))
131
-
132
- # Build token lines with MWTs and empty nodes
133
- token_idx = 1
134
- for i in range(len(example['tokens'])):
135
- # Insert MWT line if needed (before the token)
136
- if token_idx in mwt_ranges:
137
- mwt = mwt_ranges[token_idx]
138
- feats = mwt.get('feats') or '_'
139
- misc = mwt.get('misc') or '_'
140
- lines.append(f"{mwt['id']}\t{mwt['form']}\t_\t_\t_\t{feats}\t_\t_\t_\t{misc}")
141
-
142
- # Convert UPOS from ClassLabel index to string if needed
143
- upos_value = example['upos'][i]
144
- if isinstance(upos_value, int) and upos_names:
145
- upos_str = upos_names[upos_value]
146
- else:
147
- upos_str = str(upos_value)
148
-
149
- # Regular token line
150
- fields = [
151
- str(token_idx),
152
- str(example['tokens'][i]),
153
- str(example['lemmas'][i]),
154
- str(upos_str),
155
- str(example['xpos'][i] or '_'),
156
- str(example['feats'][i] or '_'),
157
- str(example['head'][i]),
158
- str(example['deprel'][i]),
159
- str(example['deps'][i] or '_'),
160
- str(example['misc'][i] or '_'),
161
- ]
162
- lines.append('\t'.join(fields))
163
-
164
- # Insert empty nodes after token if needed (e.g., 22.1 after token 22)
165
- if token_idx in empty_node_positions:
166
- for empty_node in empty_node_positions[token_idx]:
167
- en_fields = [
168
- empty_node.get('id', '_'),
169
- empty_node.get('form', '_'),
170
- empty_node.get('lemma', '_'),
171
- empty_node.get('upos', '_'),
172
- empty_node.get('xpos') or '_',
173
- empty_node.get('feats') or '_',
174
- empty_node.get('head', '_'),
175
- empty_node.get('deprel', '_'),
176
- empty_node.get('deps') or '_',
177
- empty_node.get('misc') or '_',
178
- ]
179
- lines.append('\t'.join(en_fields))
180
-
181
- token_idx += 1
182
-
183
- # Add blank line after sentence per UD spec
184
- return '\n'.join(lines) + '\n\n'
185
-
186
-
187
- def normalize_conllu(text: str) -> str:
188
- """Normalize CoNLL-U text for comparison (strip trailing blank lines)."""
189
- lines = text.strip().split('\n')
190
- # Remove trailing empty lines
191
- while lines and lines[-1] == '':
192
- lines.pop()
193
- return '\n'.join(lines) + '\n'
194
-
195
-
196
- def validate_treebank_text(
197
- name: str,
198
- metadata: Dict[str, Any],
199
- parquet_dir: Path,
200
- verbose: bool = True,
201
- very_verbose: bool = False
202
- ) -> Dict[str, Any]:
203
- """
204
- Validate a single treebank using text-based comparison with unified diff.
205
-
206
- Args:
207
- name: Treebank name (e.g., "fr_gsd")
208
- metadata: Treebank metadata including splits and file paths
209
- parquet_dir: Path to parquet directory (local or HF Hub path)
210
- verbose: Print progress messages
211
- very_verbose: Print all differences (not just first 20 lines)
212
-
213
- Returns:
214
- Validation results dictionary
215
- """
216
- results = {
217
- 'name': name,
218
- 'splits': {},
219
- 'total_sentences': 0,
220
- 'total_errors': 0,
221
- 'success': True
222
- }
223
-
224
- if verbose:
225
- print(f" Text-based comparison...")
226
-
227
- # Check if parquet directory exists for local validation
228
- treebank_parquet_dir = parquet_dir / name
229
- if isinstance(parquet_dir, Path) and not treebank_parquet_dir.exists():
230
- results['success'] = False
231
- results['error'] = f"Parquet directory not found: {treebank_parquet_dir}"
232
- if verbose:
233
- print(f" ERROR: {results['error']}")
234
- return results
235
-
236
- # Process each split
237
- for split_name, split_data in metadata.get("splits", {}).items():
238
- if isinstance(parquet_dir, Path):
239
- parquet_file = treebank_parquet_dir / f"{split_name}.parquet"
240
- if not parquet_file.exists():
241
- continue
242
- parquet_path = str(parquet_file)
243
- else:
244
- # HF Hub path
245
- parquet_path = f"{parquet_dir}/{name}/{split_name}.parquet"
246
-
247
- try:
248
- # Load parquet dataset
249
- ds = load_dataset('parquet', data_files={split_name: parquet_path})
250
- dataset = ds[split_name]
251
- except Exception as e:
252
- results['success'] = False
253
- results['splits'][split_name] = {
254
- 'error': f"Failed to load parquet: {e}",
255
- 'sentences': 0,
256
- 'errors': 0
257
- }
258
- continue
259
-
260
- # Get UPOS names for ClassLabel conversion
261
- upos_names = dataset.features['upos'].feature.names
262
-
263
- # Reconstruct all examples to CoNLL-U
264
- reconstructed_conllu = ""
265
- for example in dataset:
266
- reconstructed_conllu += example_to_conllu(example, upos_names)
267
-
268
- # Load original CoNLL-U files
269
- original_conllu = ""
270
- files = split_data.get("files", [])
271
- if not files:
272
- results['splits'][split_name] = {
273
- 'error': f"No files found in metadata",
274
- 'sentences': 0,
275
- 'errors': 0
276
- }
277
- continue
278
-
279
- for file_path in files:
280
- # Extract just the filename from the path
281
- filename = Path(file_path).name
282
-
283
- # Construct full path: UD_repos/dirname/filename
284
- full_path = UD_REPOS_DIR / metadata["dirname"] / filename
285
-
286
- if not full_path.exists():
287
- results['success'] = False
288
- results['splits'][split_name] = {
289
- 'error': f"Original file not found: {full_path}",
290
- 'sentences': 0,
291
- 'errors': 0
292
- }
293
- continue
294
-
295
- # Read original file
296
- with open(full_path, 'r', encoding='utf-8') as f:
297
- original_conllu += f.read()
298
-
299
- # Normalize both for comparison
300
- original_normalized = normalize_conllu(original_conllu)
301
- reconstructed_normalized = normalize_conllu(reconstructed_conllu)
302
-
303
- # Compare
304
- num_sentences = len(dataset)
305
- results['total_sentences'] += num_sentences
306
-
307
- if original_normalized == reconstructed_normalized:
308
- results['splits'][split_name] = {
309
- 'sentences': num_sentences,
310
- 'errors': 0,
311
- 'passed': True
312
- }
313
- if verbose:
314
- print(f" ✅ {split_name}: {num_sentences} sentences match perfectly")
315
- else:
316
- results['success'] = False
317
-
318
- # Find differences (use n=1 for minimal context)
319
- original_lines = original_normalized.split('\n')
320
- reconstructed_lines = reconstructed_normalized.split('\n')
321
-
322
- diff = list(difflib.unified_diff(
323
- original_lines,
324
- reconstructed_lines,
325
- fromfile=f'original_{split_name}',
326
- tofile=f'reconstructed_{split_name}',
327
- lineterm='',
328
- n=1 # Show only 1 line of context (instead of default 3)
329
- ))
330
-
331
- num_diff_lines = len([l for l in diff if l.startswith('+') or l.startswith('-')])
332
- results['total_errors'] += num_diff_lines
333
-
334
- results['splits'][split_name] = {
335
- 'sentences': num_sentences,
336
- 'errors': num_diff_lines,
337
- 'diff': diff, # Store all diff lines for very_verbose mode
338
- 'passed': False
339
- }
340
-
341
- if verbose:
342
- print(f" ❌ {split_name}: Found {num_diff_lines} different lines")
343
- if very_verbose:
344
- print(f" All differences:")
345
- for line in diff:
346
- print(f" {line}")
347
- else:
348
- print(f" First differences (use -vv to see all):")
349
- for line in diff[:20]:
350
- print(f" {line}")
351
- if len(diff) > 20:
352
- print(f" ... ({len(diff) - 20} more diff lines)")
353
-
354
- return results
355
-
356
-
357
- def validate_treebank(
358
- name: str,
359
- metadata: Dict[str, Any],
360
- use_local: bool = False,
361
- revision: str = "2.17",
362
- mode: str = "text",
363
- verbose: bool = True,
364
- very_verbose: bool = False
365
- ) -> Dict[str, Any]:
366
- """
367
- Validate a single treebank.
368
-
369
- Args:
370
- name: Treebank name (e.g., "fr_gsd")
371
- metadata: Treebank metadata
372
- use_local: Load from local parquet files instead of HF Hub
373
- revision: HuggingFace Hub revision
374
- mode: Comparison mode ('text', 'field', or 'both')
375
- verbose: Print progress messages
376
- very_verbose: Print all differences (not just first 20 lines)
377
-
378
- Returns:
379
- Validation results dictionary
380
- """
381
- if verbose:
382
- source = "local parquet" if use_local else f"HF Hub (revision={revision})"
383
- print(f"\nValidating {name} from {source}...")
384
-
385
- # Determine parquet directory
386
- if use_local:
387
- parquet_dir = PARQUET_DIR
388
- else:
389
- parquet_dir = f"hf://datasets/commul/universal_dependencies@{revision}/parquet"
390
-
391
- # Run text-based validation (default and recommended)
392
- if mode in ('text', 'both'):
393
- results = validate_treebank_text(
394
- name,
395
- metadata,
396
- parquet_dir if use_local else Path(parquet_dir),
397
- verbose,
398
- very_verbose
399
- )
400
- else:
401
- results = {
402
- 'name': name,
403
- 'splits': {},
404
- 'total_sentences': 0,
405
- 'total_errors': 0,
406
- 'success': True
407
- }
408
-
409
- # Note: field-by-field mode could be added here if needed
410
- # For now, text mode is the primary validation method as it tests
411
- # the actual production reconstruction logic
412
-
413
- return results
414
 
415
 
416
  def main():
417
- """Main entry point for validation."""
418
  parser = argparse.ArgumentParser(
419
- description="Validate Parquet files against original CoNLL-U data",
420
- formatter_class=argparse.RawDescriptionHelpFormatter,
421
- epilog="""
422
- Examples:
423
- # Validate 3 test treebanks from local files
424
- %(prog)s --test --local
425
-
426
- # Validate specific treebanks from HuggingFace Hub
427
- %(prog)s --treebanks fr_gsd,en_ewt --revision 2.17
428
-
429
- # Validate all local treebanks
430
- %(prog)s --local
431
- """
432
- )
433
- parser.add_argument(
434
- "--test",
435
- action="store_true",
436
- help="Only validate 3 test treebanks (fr_gsd, en_ewt, it_isdt)"
437
- )
438
- parser.add_argument(
439
- "--treebanks",
440
- type=str,
441
- help="Comma-separated list of treebank names to validate"
442
- )
443
- parser.add_argument(
444
- "--revision",
445
- type=str,
446
- default=UD_VER,
447
- help=f"HuggingFace Hub revision (default: {UD_VER})"
448
- )
449
- parser.add_argument(
450
- "--local",
451
- action="store_true",
452
- help="Validate local parquet files instead of HuggingFace Hub"
453
- )
454
- parser.add_argument(
455
- "--mode",
456
- type=str,
457
- choices=['text', 'field', 'both'],
458
- default='text',
459
- help="Comparison mode: text uses unified diff (default), field compares field-by-field, both runs both"
460
- )
461
- parser.add_argument(
462
- "-v", "--verbose",
463
- action="store_true",
464
- default=True,
465
- help="Print progress messages (default: True)"
466
- )
467
- parser.add_argument(
468
- "-vv", "--very-verbose",
469
- action="store_true",
470
- help="Print all differences (not just first 20 lines)"
471
- )
472
- parser.add_argument(
473
- "-q", "--quiet",
474
- action="store_true",
475
- help="Suppress progress messages"
476
  )
 
 
 
 
 
 
 
 
477
 
478
  args = parser.parse_args()
479
- verbose = args.verbose and not args.quiet
480
- very_verbose = args.very_verbose
481
 
482
- # Load metadata
483
- if not METADATA_FILE.exists():
484
- print(f"ERROR: Metadata file not found: {METADATA_FILE}", file=sys.stderr)
485
- print(f"Run 02_traverse_ud_repos.py first to generate metadata.", file=sys.stderr)
486
- return 1
487
-
488
- with open(METADATA_FILE, 'r', encoding='utf-8') as f:
489
- metadata = json.load(f)
490
-
491
- if verbose:
492
- print("=" * 60)
493
- print("Universal Dependencies Parquet Validation")
494
- print("=" * 60)
495
- print(f"Loaded metadata for {len(metadata)} treebanks")
496
- if args.local:
497
- print(f"Source: Local parquet files ({PARQUET_DIR})")
498
- else:
499
- print(f"Source: HuggingFace Hub (revision={args.revision})")
500
- print(f"Comparison mode: {args.mode}")
501
- print(f"UD repos directory: {UD_REPOS_DIR}")
502
- print()
503
-
504
- # Determine which treebanks to validate
505
- if args.test:
506
- # Test mode: validate 3 diverse treebanks
507
- treebanks_to_validate = ["fr_gsd", "en_ewt", "it_isdt"]
508
- treebanks_to_validate = [t for t in treebanks_to_validate if t in metadata]
509
- if verbose:
510
- print(f"TEST MODE: Validating {len(treebanks_to_validate)} treebanks")
511
- elif args.treebanks:
512
- # User-specified treebanks
513
- treebanks_to_validate = [t.strip() for t in args.treebanks.split(",")]
514
- treebanks_to_validate = [t for t in treebanks_to_validate if t in metadata]
515
- if verbose:
516
- print(f"Validating {len(treebanks_to_validate)} specified treebanks")
517
  else:
518
- # All treebanks
519
- treebanks_to_validate = sorted(metadata.keys())
520
- if verbose:
521
- print(f"Validating all {len(treebanks_to_validate)} treebanks")
522
-
523
- # Validate treebanks
524
- success_count = 0
525
- fail_count = 0
526
- all_results = []
527
 
528
- for i, name in enumerate(treebanks_to_validate, 1):
529
- if verbose:
530
- print(f"\n[{i}/{len(treebanks_to_validate)}] {name}")
531
-
532
- try:
533
- results = validate_treebank(
534
- name,
535
- metadata[name],
536
- use_local=args.local,
537
- revision=args.revision,
538
- mode=args.mode,
539
- verbose=verbose,
540
- very_verbose=very_verbose
541
- )
542
-
543
- all_results.append(results)
544
-
545
- if results['success']:
546
- success_count += 1
547
- else:
548
- fail_count += 1
549
-
550
- except Exception as e:
551
- print(f" ERROR: {e}")
552
- import traceback
553
- traceback.print_exc()
554
- fail_count += 1
555
-
556
- # Summary
557
- if verbose:
558
- print()
559
- print("=" * 60)
560
- print("VALIDATION SUMMARY")
561
- print("=" * 60)
562
- print(f"✅ Passed: {success_count}")
563
- print(f"❌ Failed: {fail_count}")
564
- print(f"Total: {success_count + fail_count}")
565
-
566
- total_sentences = sum(r['total_sentences'] for r in all_results)
567
- total_errors = sum(r['total_errors'] for r in all_results)
568
-
569
- print(f"\nTotal sentences validated: {total_sentences:,}")
570
- print(f"Total errors found: {total_errors:,}")
571
- print()
572
-
573
- if fail_count == 0:
574
- print("🎉 SUCCESS: All parquet files validated successfully!")
575
- print("The reconstructed CoNLL-U matches original files 100%.")
576
- if not args.local:
577
- print("HuggingFace Hub data is correct and ready for production use.")
578
- else:
579
- print("⚠️ VALIDATION FAILED: Some treebanks have differences.")
580
- print("Please review errors before using in production.")
581
-
582
- return 0 if fail_count == 0 else 1
583
 
584
 
585
  if __name__ == "__main__":
 
3
  # /// script
4
  # requires-python = ">=3.12"
5
  # dependencies = [
6
+ # "ud-hf-parquet-tools",
7
+ # "python-dotenv",
8
  # ]
9
  # ///
10
  """
11
  Validate Parquet files by comparing with original CoNLL-U data.
12
 
13
+ This script is a wrapper around the ud-hf-parquet-tools library.
14
+ For the actual implementation, see: https://github.com/bot-zen/ud-hf-parquet-tools
15
+
16
  This script can validate both:
17
  - Remote: Downloaded from HuggingFace Hub
18
  - Local: Local parquet files in ../parquet/
19
 
 
 
 
 
20
  Repository: commul/universal_dependencies
21
 
22
  Usage:
 
27
  --treebanks NAMES Comma-separated list of treebank names to validate
28
  --revision BRANCH HuggingFace Hub revision/branch (default: 2.17)
29
  --local Validate local parquet files instead of HuggingFace Hub
30
+ --parquet-dir DIR Path to local parquet directory (implies --local)
31
  -v, --verbose Print progress messages (default: True)
32
+ -vv, --very-verbose Show all differences
33
  -q, --quiet Suppress progress messages
34
 
35
  Examples:
 
39
  # Validate specific treebanks from HuggingFace Hub
40
  python 05_validate_parquet.py --treebanks fr_gsd,en_ewt --revision 2.17
41
 
42
+ # Validate all treebanks from main branch
43
+ python 05_validate_parquet.py --revision main
 
 
 
44
  """
45
 
46
  import argparse
 
 
47
  import os
48
  import sys
49
  from pathlib import Path
 
50
 
 
51
  from dotenv import load_dotenv
52
+ from ud_hf_parquet_tools.cli import main as ud_tools_main
53
 
54
 
55
  # Load environment variables
56
  load_dotenv()
57
  UD_VER = os.getenv("UD_VER", "2.17")
58
 
59
+ # Project paths
60
+ REPO_ROOT = Path(__file__).parent.parent.resolve()
61
+ METADATA_FILE = REPO_ROOT / f"metadata-{UD_VER}.json"
62
+ UD_REPOS_DIR = REPO_ROOT / "tools" / "UD_repos"
63
+ PARQUET_DIR = REPO_ROOT / "parquet"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
 
66
  def main():
67
+ """Main entry point - wrapper around ud-hf-parquet-tools."""
68
  parser = argparse.ArgumentParser(
69
+ description="Validate Parquet files by comparing with original CoNLL-U data"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  )
71
+ parser.add_argument("--test", action="store_true", help="Test mode: validate 3 treebanks only")
72
+ parser.add_argument("--treebanks", help="Comma-separated list of treebank names")
73
+ parser.add_argument("--revision", default=UD_VER, help="HuggingFace Hub revision/branch")
74
+ parser.add_argument("--local", action="store_true", help="Validate local parquet files")
75
+ parser.add_argument("--parquet-dir", help="Path to local parquet directory (implies --local)")
76
+ parser.add_argument("-v", "--verbose", action="store_true", default=True, help="Verbose output")
77
+ parser.add_argument("-vv", "--very-verbose", action="store_true", help="Show all differences")
78
+ parser.add_argument("-q", "--quiet", action="store_true", help="Quiet mode")
79
 
80
  args = parser.parse_args()
 
 
81
 
82
+ # Build arguments for ud-hf-parquet-tools
83
+ tool_args = [
84
+ "ud-hfp-tools",
85
+ "validate",
86
+ "--metadata", str(METADATA_FILE),
87
+ "--ud-repos-dir", str(UD_REPOS_DIR),
88
+ ]
89
+
90
+ # Handle parquet directory
91
+ if args.parquet_dir:
92
+ tool_args.extend(["--parquet-dir", args.parquet_dir])
93
+ elif args.local:
94
+ tool_args.extend(["--parquet-dir", str(PARQUET_DIR)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  else:
96
+ # Remote validation
97
+ tool_args.extend(["--revision", args.revision])
 
 
 
 
 
 
 
98
 
99
+ if args.test:
100
+ tool_args.append("--test")
101
+
102
+ if args.treebanks:
103
+ tool_args.extend(["--treebanks", args.treebanks])
104
+
105
+ if args.very_verbose:
106
+ tool_args.append("-vv")
107
+ elif args.quiet:
108
+ tool_args.append("--quiet")
109
+ elif args.verbose:
110
+ tool_args.append("--verbose")
111
+
112
+ # Replace sys.argv and call the library's main function
113
+ original_argv = sys.argv
114
+ try:
115
+ sys.argv = tool_args
116
+ return ud_tools_main()
117
+ finally:
118
+ sys.argv = original_argv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
 
121
  if __name__ == "__main__":