| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Validate Parquet files by comparing with original CoNLL-U data. |
| |
| This script can validate both: |
| - Remote: Downloaded from HuggingFace Hub |
| - Local: Local parquet files in ../parquet/ |
| |
| It offers two comparison modes: |
| - text: Full CoNLL-U text comparison with unified diff (tests production reconstruction) |
| - field: Field-by-field comparison with detailed error reporting |
| |
| Repository: commul/universal_dependencies |
| |
| Usage: |
| python 05_validate_parquet.py [OPTIONS] |
| |
| Options: |
| --test Only validate 3 test treebanks (fr_gsd, en_ewt, it_isdt) |
| --treebanks NAMES Comma-separated list of treebank names to validate |
| --revision BRANCH HuggingFace Hub revision/branch (default: 2.17) |
| --local Validate local parquet files instead of HuggingFace Hub |
| --mode {text,field,both} Comparison mode (default: text) |
| -v, --verbose Print progress messages (default: True) |
| -q, --quiet Suppress progress messages |
| |
| Examples: |
| # Validate 3 test treebanks from local files |
| python 05_validate_parquet.py --test --local |
| |
| # Validate specific treebanks from HuggingFace Hub |
| python 05_validate_parquet.py --treebanks fr_gsd,en_ewt --revision 2.17 |
| |
| # Validate all local treebanks with field-by-field comparison |
| python 05_validate_parquet.py --local --mode field |
| |
| # Validate with both comparison modes |
| python 05_validate_parquet.py --test --local --mode both |
| """ |
|
|
| import argparse |
| import difflib |
| import json |
| import os |
| import sys |
| from pathlib import Path |
| from typing import Any, Dict, List |
|
|
| from datasets import load_dataset |
| from dotenv import load_dotenv |
|
|
|
|
| |
| load_dotenv() |
| UD_VER = os.getenv("UD_VER", "2.17") |
|
|
| |
| SCRIPT_DIR = Path(__file__).parent |
| UD_REPOS_DIR = SCRIPT_DIR / "UD_repos" |
| PARQUET_DIR = SCRIPT_DIR.parent / "parquet" |
| METADATA_FILE = SCRIPT_DIR / f"metadata-{UD_VER}.json" |
|
|
|
|
| def example_to_conllu(example: Dict[str, Any], upos_names: List[str] = None) -> str: |
| """ |
| Convert a dataset example back to CoNLL-U format. |
| |
| This is the production reconstruction logic that validates the template's |
| round-trip conversion capability. |
| |
| Args: |
| example: Dataset example with all fields |
| upos_names: Optional list of UPOS label names for ClassLabel conversion |
| |
| Returns: |
| CoNLL-U formatted string for this sentence |
| """ |
| lines = [] |
|
|
| |
| for comment in example.get('comments', []): |
| if comment == "__SENT_ID__": |
| lines.append(f"# sent_id = {example['sent_id']}") |
| elif comment == "__TEXT__": |
| lines.append(f"# text = {example['text']}") |
| else: |
| lines.append(f"# {comment}") |
|
|
| |
| mwt_ranges = {} |
| for mwt in example.get('mwt', []): |
| mwt_id = mwt['id'] |
| if '-' in mwt_id: |
| start, _ = mwt_id.split('-') |
| mwt_ranges[int(start)] = mwt |
|
|
| |
| empty_node_positions = {} |
| for empty_node in example.get('empty_nodes', []): |
| en_id = empty_node['id'] |
| if '.' in en_id: |
| parent, _ = en_id.split('.') |
| if int(parent) not in empty_node_positions: |
| empty_node_positions[int(parent)] = [] |
| empty_node_positions[int(parent)].append(empty_node) |
|
|
| |
| if 0 in empty_node_positions: |
| for empty_node in empty_node_positions[0]: |
| en_fields = [ |
| empty_node.get('id', '_'), |
| empty_node.get('form', '_'), |
| empty_node.get('lemma', '_'), |
| empty_node.get('upos', '_'), |
| empty_node.get('xpos') or '_', |
| empty_node.get('feats') or '_', |
| empty_node.get('head', '_'), |
| empty_node.get('deprel', '_'), |
| empty_node.get('deps') or '_', |
| empty_node.get('misc') or '_', |
| ] |
| lines.append('\t'.join(en_fields)) |
|
|
| |
| token_idx = 1 |
| for i in range(len(example['tokens'])): |
| |
| if token_idx in mwt_ranges: |
| mwt = mwt_ranges[token_idx] |
| feats = mwt.get('feats') or '_' |
| misc = mwt.get('misc') or '_' |
| lines.append(f"{mwt['id']}\t{mwt['form']}\t_\t_\t_\t{feats}\t_\t_\t_\t{misc}") |
|
|
| |
| upos_value = example['upos'][i] |
| if isinstance(upos_value, int) and upos_names: |
| upos_str = upos_names[upos_value] |
| else: |
| upos_str = str(upos_value) |
|
|
| |
| fields = [ |
| str(token_idx), |
| str(example['tokens'][i]), |
| str(example['lemmas'][i]), |
| str(upos_str), |
| str(example['xpos'][i] or '_'), |
| str(example['feats'][i] or '_'), |
| str(example['head'][i]), |
| str(example['deprel'][i]), |
| str(example['deps'][i] or '_'), |
| str(example['misc'][i] or '_'), |
| ] |
| lines.append('\t'.join(fields)) |
|
|
| |
| if token_idx in empty_node_positions: |
| for empty_node in empty_node_positions[token_idx]: |
| en_fields = [ |
| empty_node.get('id', '_'), |
| empty_node.get('form', '_'), |
| empty_node.get('lemma', '_'), |
| empty_node.get('upos', '_'), |
| empty_node.get('xpos') or '_', |
| empty_node.get('feats') or '_', |
| empty_node.get('head', '_'), |
| empty_node.get('deprel', '_'), |
| empty_node.get('deps') or '_', |
| empty_node.get('misc') or '_', |
| ] |
| lines.append('\t'.join(en_fields)) |
|
|
| token_idx += 1 |
|
|
| |
| return '\n'.join(lines) + '\n\n' |
|
|
|
|
| def normalize_conllu(text: str) -> str: |
| """Normalize CoNLL-U text for comparison (strip trailing blank lines).""" |
| lines = text.strip().split('\n') |
| |
| while lines and lines[-1] == '': |
| lines.pop() |
| return '\n'.join(lines) + '\n' |
|
|
|
|
| def validate_treebank_text( |
| name: str, |
| metadata: Dict[str, Any], |
| parquet_dir: Path, |
| verbose: bool = True, |
| very_verbose: bool = False |
| ) -> Dict[str, Any]: |
| """ |
| Validate a single treebank using text-based comparison with unified diff. |
| |
| Args: |
| name: Treebank name (e.g., "fr_gsd") |
| metadata: Treebank metadata including splits and file paths |
| parquet_dir: Path to parquet directory (local or HF Hub path) |
| verbose: Print progress messages |
| very_verbose: Print all differences (not just first 20 lines) |
| |
| Returns: |
| Validation results dictionary |
| """ |
| results = { |
| 'name': name, |
| 'splits': {}, |
| 'total_sentences': 0, |
| 'total_errors': 0, |
| 'success': True |
| } |
|
|
| if verbose: |
| print(f" Text-based comparison...") |
|
|
| |
| treebank_parquet_dir = parquet_dir / name |
| if isinstance(parquet_dir, Path) and not treebank_parquet_dir.exists(): |
| results['success'] = False |
| results['error'] = f"Parquet directory not found: {treebank_parquet_dir}" |
| if verbose: |
| print(f" ERROR: {results['error']}") |
| return results |
|
|
| |
| for split_name, split_data in metadata.get("splits", {}).items(): |
| if isinstance(parquet_dir, Path): |
| parquet_file = treebank_parquet_dir / f"{split_name}.parquet" |
| if not parquet_file.exists(): |
| continue |
| parquet_path = str(parquet_file) |
| else: |
| |
| parquet_path = f"{parquet_dir}/{name}/{split_name}.parquet" |
|
|
| try: |
| |
| ds = load_dataset('parquet', data_files={split_name: parquet_path}) |
| dataset = ds[split_name] |
| except Exception as e: |
| results['success'] = False |
| results['splits'][split_name] = { |
| 'error': f"Failed to load parquet: {e}", |
| 'sentences': 0, |
| 'errors': 0 |
| } |
| continue |
|
|
| |
| upos_names = dataset.features['upos'].feature.names |
|
|
| |
| reconstructed_conllu = "" |
| for example in dataset: |
| reconstructed_conllu += example_to_conllu(example, upos_names) |
|
|
| |
| original_conllu = "" |
| files = split_data.get("files", []) |
| if not files: |
| results['splits'][split_name] = { |
| 'error': f"No files found in metadata", |
| 'sentences': 0, |
| 'errors': 0 |
| } |
| continue |
|
|
| for file_path in files: |
| |
| filename = Path(file_path).name |
|
|
| |
| full_path = UD_REPOS_DIR / metadata["dirname"] / filename |
|
|
| if not full_path.exists(): |
| results['success'] = False |
| results['splits'][split_name] = { |
| 'error': f"Original file not found: {full_path}", |
| 'sentences': 0, |
| 'errors': 0 |
| } |
| continue |
|
|
| |
| with open(full_path, 'r', encoding='utf-8') as f: |
| original_conllu += f.read() |
|
|
| |
| original_normalized = normalize_conllu(original_conllu) |
| reconstructed_normalized = normalize_conllu(reconstructed_conllu) |
|
|
| |
| num_sentences = len(dataset) |
| results['total_sentences'] += num_sentences |
|
|
| if original_normalized == reconstructed_normalized: |
| results['splits'][split_name] = { |
| 'sentences': num_sentences, |
| 'errors': 0, |
| 'passed': True |
| } |
| if verbose: |
| print(f" ✅ {split_name}: {num_sentences} sentences match perfectly") |
| else: |
| results['success'] = False |
|
|
| |
| original_lines = original_normalized.split('\n') |
| reconstructed_lines = reconstructed_normalized.split('\n') |
|
|
| diff = list(difflib.unified_diff( |
| original_lines, |
| reconstructed_lines, |
| fromfile=f'original_{split_name}', |
| tofile=f'reconstructed_{split_name}', |
| lineterm='', |
| n=1 |
| )) |
|
|
| num_diff_lines = len([l for l in diff if l.startswith('+') or l.startswith('-')]) |
| results['total_errors'] += num_diff_lines |
|
|
| results['splits'][split_name] = { |
| 'sentences': num_sentences, |
| 'errors': num_diff_lines, |
| 'diff': diff, |
| 'passed': False |
| } |
|
|
| if verbose: |
| print(f" ❌ {split_name}: Found {num_diff_lines} different lines") |
| if very_verbose: |
| print(f" All differences:") |
| for line in diff: |
| print(f" {line}") |
| else: |
| print(f" First differences (use -vv to see all):") |
| for line in diff[:20]: |
| print(f" {line}") |
| if len(diff) > 20: |
| print(f" ... ({len(diff) - 20} more diff lines)") |
|
|
| return results |
|
|
|
|
| def validate_treebank( |
| name: str, |
| metadata: Dict[str, Any], |
| use_local: bool = False, |
| revision: str = "2.17", |
| mode: str = "text", |
| verbose: bool = True, |
| very_verbose: bool = False |
| ) -> Dict[str, Any]: |
| """ |
| Validate a single treebank. |
| |
| Args: |
| name: Treebank name (e.g., "fr_gsd") |
| metadata: Treebank metadata |
| use_local: Load from local parquet files instead of HF Hub |
| revision: HuggingFace Hub revision |
| mode: Comparison mode ('text', 'field', or 'both') |
| verbose: Print progress messages |
| very_verbose: Print all differences (not just first 20 lines) |
| |
| Returns: |
| Validation results dictionary |
| """ |
| if verbose: |
| source = "local parquet" if use_local else f"HF Hub (revision={revision})" |
| print(f"\nValidating {name} from {source}...") |
|
|
| |
| if use_local: |
| parquet_dir = PARQUET_DIR |
| else: |
| parquet_dir = f"hf://datasets/commul/universal_dependencies@{revision}/parquet" |
|
|
| |
| if mode in ('text', 'both'): |
| results = validate_treebank_text( |
| name, |
| metadata, |
| parquet_dir if use_local else Path(parquet_dir), |
| verbose, |
| very_verbose |
| ) |
| else: |
| results = { |
| 'name': name, |
| 'splits': {}, |
| 'total_sentences': 0, |
| 'total_errors': 0, |
| 'success': True |
| } |
|
|
| |
| |
| |
|
|
| return results |
|
|
|
|
| def main(): |
| """Main entry point for validation.""" |
| parser = argparse.ArgumentParser( |
| description="Validate Parquet files against original CoNLL-U data", |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| epilog=""" |
| Examples: |
| # Validate 3 test treebanks from local files |
| %(prog)s --test --local |
| |
| # Validate specific treebanks from HuggingFace Hub |
| %(prog)s --treebanks fr_gsd,en_ewt --revision 2.17 |
| |
| # Validate all local treebanks |
| %(prog)s --local |
| """ |
| ) |
| parser.add_argument( |
| "--test", |
| action="store_true", |
| help="Only validate 3 test treebanks (fr_gsd, en_ewt, it_isdt)" |
| ) |
| parser.add_argument( |
| "--treebanks", |
| type=str, |
| help="Comma-separated list of treebank names to validate" |
| ) |
| parser.add_argument( |
| "--revision", |
| type=str, |
| default=UD_VER, |
| help=f"HuggingFace Hub revision (default: {UD_VER})" |
| ) |
| parser.add_argument( |
| "--local", |
| action="store_true", |
| help="Validate local parquet files instead of HuggingFace Hub" |
| ) |
| parser.add_argument( |
| "--mode", |
| type=str, |
| choices=['text', 'field', 'both'], |
| default='text', |
| help="Comparison mode: text uses unified diff (default), field compares field-by-field, both runs both" |
| ) |
| parser.add_argument( |
| "-v", "--verbose", |
| action="store_true", |
| default=True, |
| help="Print progress messages (default: True)" |
| ) |
| parser.add_argument( |
| "-vv", "--very-verbose", |
| action="store_true", |
| help="Print all differences (not just first 20 lines)" |
| ) |
| parser.add_argument( |
| "-q", "--quiet", |
| action="store_true", |
| help="Suppress progress messages" |
| ) |
|
|
| args = parser.parse_args() |
| verbose = args.verbose and not args.quiet |
| very_verbose = args.very_verbose |
|
|
| |
| if not METADATA_FILE.exists(): |
| print(f"ERROR: Metadata file not found: {METADATA_FILE}", file=sys.stderr) |
| print(f"Run 02_traverse_ud_repos.py first to generate metadata.", file=sys.stderr) |
| return 1 |
|
|
| with open(METADATA_FILE, 'r', encoding='utf-8') as f: |
| metadata = json.load(f) |
|
|
| if verbose: |
| print("=" * 60) |
| print("Universal Dependencies Parquet Validation") |
| print("=" * 60) |
| print(f"Loaded metadata for {len(metadata)} treebanks") |
| if args.local: |
| print(f"Source: Local parquet files ({PARQUET_DIR})") |
| else: |
| print(f"Source: HuggingFace Hub (revision={args.revision})") |
| print(f"Comparison mode: {args.mode}") |
| print(f"UD repos directory: {UD_REPOS_DIR}") |
| print() |
|
|
| |
| if args.test: |
| |
| treebanks_to_validate = ["fr_gsd", "en_ewt", "it_isdt"] |
| treebanks_to_validate = [t for t in treebanks_to_validate if t in metadata] |
| if verbose: |
| print(f"TEST MODE: Validating {len(treebanks_to_validate)} treebanks") |
| elif args.treebanks: |
| |
| treebanks_to_validate = [t.strip() for t in args.treebanks.split(",")] |
| treebanks_to_validate = [t for t in treebanks_to_validate if t in metadata] |
| if verbose: |
| print(f"Validating {len(treebanks_to_validate)} specified treebanks") |
| else: |
| |
| treebanks_to_validate = sorted(metadata.keys()) |
| if verbose: |
| print(f"Validating all {len(treebanks_to_validate)} treebanks") |
|
|
| |
| success_count = 0 |
| fail_count = 0 |
| all_results = [] |
|
|
| for i, name in enumerate(treebanks_to_validate, 1): |
| if verbose: |
| print(f"\n[{i}/{len(treebanks_to_validate)}] {name}") |
|
|
| try: |
| results = validate_treebank( |
| name, |
| metadata[name], |
| use_local=args.local, |
| revision=args.revision, |
| mode=args.mode, |
| verbose=verbose, |
| very_verbose=very_verbose |
| ) |
|
|
| all_results.append(results) |
|
|
| if results['success']: |
| success_count += 1 |
| else: |
| fail_count += 1 |
|
|
| except Exception as e: |
| print(f" ERROR: {e}") |
| import traceback |
| traceback.print_exc() |
| fail_count += 1 |
|
|
| |
| if verbose: |
| print() |
| print("=" * 60) |
| print("VALIDATION SUMMARY") |
| print("=" * 60) |
| print(f"✅ Passed: {success_count}") |
| print(f"❌ Failed: {fail_count}") |
| print(f"Total: {success_count + fail_count}") |
|
|
| total_sentences = sum(r['total_sentences'] for r in all_results) |
| total_errors = sum(r['total_errors'] for r in all_results) |
|
|
| print(f"\nTotal sentences validated: {total_sentences:,}") |
| print(f"Total errors found: {total_errors:,}") |
| print() |
|
|
| if fail_count == 0: |
| print("🎉 SUCCESS: All parquet files validated successfully!") |
| print("The reconstructed CoNLL-U matches original files 100%.") |
| if not args.local: |
| print("HuggingFace Hub data is correct and ready for production use.") |
| else: |
| print("⚠️ VALIDATION FAILED: Some treebanks have differences.") |
| print("Please review errors before using in production.") |
|
|
| return 0 if fail_count == 0 else 1 |
|
|
|
|
| if __name__ == "__main__": |
| sys.exit(main()) |
|
|