| """ |
| Convert sentences to Universal Dependencies format compatible with HuggingFace. |
| Structure follows: https://huggingface.co/datasets/commul/universal_dependencies/viewer/vi_vtb |
| Uses underthesea dependency_parse for proper annotations. |
| |
| Optimized for GPU batch processing. |
| """ |
|
|
| import json |
| import os |
| from os.path import dirname, expanduser, join |
| from concurrent.futures import ThreadPoolExecutor, as_completed |
| import multiprocessing |
|
|
| |
| |
| import torch |
| _original_pack = torch.nn.utils.rnn.pack_padded_sequence |
|
|
| def _patched_pack(input, lengths, batch_first=False, enforce_sorted=True): |
| if lengths.is_cuda: |
| lengths = lengths.cpu() |
| return _original_pack(input, lengths, batch_first=batch_first, enforce_sorted=enforce_sorted) |
|
|
| torch.nn.utils.rnn.pack_padded_sequence = _patched_pack |
|
|
| from underthesea import dependency_parse, pos_tag |
|
|
| |
| _models_loaded = False |
|
|
| |
| |
| UPOS_MAP = { |
| 'N': 'NOUN', |
| 'Np': 'PROPN', |
| 'Nc': 'NOUN', |
| 'Nu': 'NOUN', |
| 'V': 'VERB', |
| 'A': 'ADJ', |
| 'P': 'PRON', |
| 'R': 'ADV', |
| 'L': 'DET', |
| 'M': 'NUM', |
| 'E': 'ADP', |
| 'C': 'CCONJ', |
| 'CC': 'CCONJ', |
| 'SC': 'SCONJ', |
| 'I': 'INTJ', |
| 'T': 'PART', |
| 'B': 'X', |
| 'Y': 'X', |
| 'S': 'SYM', |
| 'X': 'X', |
| 'CH': 'PUNCT', |
| 'Ny': 'NOUN', |
| } |
|
|
| |
| |
| AUX_WORDS = { |
| 'bị', 'chưa thể', 'chắc chắn', 'có thể', 'có vẻ', 'cần', |
| 'giả', 'không thể', 'là', 'muốn', 'nghĩa là', 'nhằm', |
| 'nên', 'phải', 'quyết', 'thôi', 'thể', 'xong', 'được', 'định' |
| } |
|
|
| |
| DET_WORDS = { |
| 'các', 'những', 'mọi', 'mỗi', 'từng', 'bất kỳ', 'một', 'hai', 'ba', |
| 'này', 'đó', 'kia', 'ấy', 'nọ', 'nào', 'đấy', 'cái', 'con', 'chiếc', |
| 'người', 'cả', 'phá tán' |
| } |
|
|
| |
| ADV_WORDS = { |
| 'không', 'chưa', 'đã', 'đang', 'sẽ', 'còn', 'vẫn', 'cũng', 'rất', |
| 'quá', 'lắm', 'hơn', 'nhất', 'luôn', 'thường', 'hay', 'ít', 'nhiều', |
| 'tự', 'một cách', 'được', 'không thể', 'lại', 'cá biệt', 'dân sự' |
| } |
|
|
| |
| DEPREL_MAP = { |
| 'acomp': 'xcomp', |
| 'nmod:comp': 'nmod', |
| 'nmod:agent': 'obl:agent', |
| 'nmod:with': 'nmod', |
| 'nmod:about': 'nmod', |
| 'compound:number': 'nummod', |
| 'compound:nmod': 'compound', |
| 'obl:pcomp': 'obl', |
| } |
|
|
|
|
| def to_upos(tag, token=None): |
| """Convert Vietnamese POS tag to Universal POS tag.""" |
| |
| if token: |
| token_lower = token.lower() |
| if token_lower in AUX_WORDS: |
| return 'AUX' |
| |
| for aux in AUX_WORDS: |
| if token_lower == aux.lower(): |
| return 'AUX' |
| return UPOS_MAP.get(tag, 'X') |
|
|
|
|
| def fix_syntax_errors(tokens, upos, head, deprel): |
| """ |
| Post-process to fix common UD SYNTAX validation errors. |
| Returns fixed (upos, deprel) lists. |
| Run multiple passes to handle dependencies between fixes. |
| """ |
| n = len(tokens) |
| upos = list(upos) |
| deprel = list(deprel) |
| head = [int(h) for h in head] |
|
|
| |
| |
| for _ in range(5): |
| changed = False |
| for i in range(n): |
| rel = deprel[i] |
|
|
| |
| |
| |
| if rel.split(':')[0] in ('aux', 'cop', 'mark', 'case', 'punct', 'det', 'nummod', 'clf'): |
| has_children = any(head[j] == i + 1 for j in range(n)) |
| if has_children: |
| my_head = head[i] |
| for j in range(n): |
| if head[j] == i + 1: |
| head[j] = my_head |
| changed = True |
| if not changed: |
| break |
|
|
| for i in range(n): |
| token_lower = tokens[i].lower() |
| rel = deprel[i] |
| pos = upos[i] |
|
|
| |
| if rel in DEPREL_MAP: |
| deprel[i] = DEPREL_MAP[rel] |
| rel = deprel[i] |
|
|
| |
| if rel.startswith('det') and pos not in ('DET', 'PRON'): |
| |
| upos[i] = 'DET' |
|
|
| |
| if rel.startswith('advmod') and pos != 'ADV': |
| |
| upos[i] = 'ADV' |
|
|
| |
| if rel.startswith('nummod') and upos[i] != 'NUM': |
| |
| if upos[i] == 'VERB': |
| deprel[i] = 'acl' |
| rel = 'acl' |
| elif upos[i] == 'ADJ': |
| deprel[i] = 'amod' |
| rel = 'amod' |
| else: |
| upos[i] = 'NUM' |
|
|
| |
| if rel.startswith('mark') and pos == 'AUX': |
| upos[i] = 'SCONJ' |
|
|
| |
| if rel == 'punct' and pos != 'PUNCT': |
| |
| if pos in ('VERB', 'NOUN', 'ADJ'): |
| deprel[i] = 'dep' |
| else: |
| upos[i] = 'PUNCT' |
|
|
| |
| if pos == 'PUNCT' and rel != 'punct': |
| deprel[i] = 'punct' |
| rel = 'punct' |
|
|
| |
| if rel == 'case' and pos in ('ADJ', 'AUX', 'PROPN', 'NOUN', 'VERB'): |
| upos[i] = 'ADP' |
|
|
| |
| if rel == 'cc' and pos not in ('CCONJ', 'SCONJ'): |
| upos[i] = 'CCONJ' |
|
|
| |
| is_valid_aux = token_lower in AUX_WORDS or any(token_lower == aux.lower() for aux in AUX_WORDS) |
| if rel.startswith('aux'): |
| if is_valid_aux: |
| upos[i] = 'AUX' |
| pos = 'AUX' |
| else: |
| |
| if pos == 'VERB' or upos[i] == 'VERB': |
| deprel[i] = 'advcl' |
| upos[i] = 'VERB' |
| elif pos == 'ADP' or upos[i] == 'ADP': |
| deprel[i] = 'mark' |
| upos[i] = 'ADP' |
| else: |
| deprel[i] = 'xcomp' |
| rel = deprel[i] |
| pos = upos[i] |
| |
| elif pos == 'AUX' and not is_valid_aux: |
| upos[i] = 'VERB' |
| pos = 'VERB' |
|
|
| |
| if rel == 'cop': |
| if token_lower != 'là': |
| |
| deprel[i] = 'xcomp' |
| rel = 'xcomp' |
| elif pos not in ('AUX', 'PRON', 'DET'): |
| upos[i] = 'AUX' |
|
|
| |
| if rel.startswith('obl') and head[i] > 0: |
| parent_idx = head[i] - 1 |
| if parent_idx < n and upos[parent_idx] in ('NOUN', 'PROPN', 'PRON'): |
| |
| if ':' in rel: |
| deprel[i] = 'nmod:' + rel.split(':')[1] |
| else: |
| deprel[i] = 'nmod' |
|
|
| |
|
|
| |
| for i in range(n): |
| rel = deprel[i] |
| base_rel = rel.split(':')[0] |
| if base_rel in ('flat', 'conj', 'appos') and head[i] > 0: |
| parent_idx = head[i] - 1 |
| if parent_idx > i: |
| |
| if ':' in rel: |
| deprel[i] = 'compound:' + rel.split(':')[1] |
| else: |
| deprel[i] = 'compound' |
|
|
| |
| for i in range(n): |
| if deprel[i] in DEPREL_MAP: |
| deprel[i] = DEPREL_MAP[deprel[i]] |
|
|
| |
| for i in range(n): |
| if deprel[i].startswith('nummod') and upos[i] != 'NUM': |
| if upos[i] == 'VERB': |
| deprel[i] = 'acl' |
| elif upos[i] == 'ADJ': |
| deprel[i] = 'amod' |
| elif upos[i] == 'NOUN': |
| deprel[i] = 'nmod' |
| else: |
| upos[i] = 'NUM' |
|
|
| |
| |
| predicates = {} |
| for i in range(n): |
| base_rel = deprel[i].split(':')[0] |
| if base_rel in ('nsubj', 'csubj') and head[i] > 0: |
| pred_idx = head[i] |
| if pred_idx not in predicates: |
| predicates[pred_idx] = [] |
| predicates[pred_idx].append((i, base_rel)) |
|
|
| for pred_idx, subj_list in predicates.items(): |
| if len(subj_list) > 1: |
| |
| subj_list.sort(key=lambda x: x[0]) |
| |
| for idx, base_rel in subj_list[1:]: |
| if ':outer' not in deprel[idx]: |
| |
| deprel[idx] = 'nsubj:outer' |
|
|
| |
| predicates_obj = {} |
| for i in range(n): |
| if deprel[i] == 'obj' and head[i] > 0: |
| pred_idx = head[i] |
| if pred_idx not in predicates_obj: |
| predicates_obj[pred_idx] = [] |
| predicates_obj[pred_idx].append(i) |
|
|
| for pred_idx, obj_indices in predicates_obj.items(): |
| if len(obj_indices) > 1: |
| |
| for idx in obj_indices[1:]: |
| |
| if idx > 0 and obj_indices[0] == idx - 1: |
| deprel[idx] = 'compound' |
| else: |
| deprel[idx] = 'iobj' |
|
|
| |
| |
| for i in range(n): |
| if upos[i] == 'PUNCT': |
| |
| candidates = [] |
| if i > 0: |
| candidates.append(i) |
| if i + 1 < n: |
| candidates.append(i + 2) |
|
|
| |
| for dist in range(2, n): |
| if i - dist >= 0: |
| candidates.append(i - dist + 1) |
| if i + dist < n: |
| candidates.append(i + dist + 1) |
|
|
| |
| best_head = candidates[0] if candidates else 1 |
| for cand in candidates: |
| test_head = list(head) |
| test_head[i] = cand |
| if not punct_causes_crossing(i, cand - 1, test_head, n): |
| best_head = cand |
| break |
|
|
| head[i] = best_head |
|
|
| return upos, [str(h) for h in head], deprel |
|
|
|
|
| def punct_causes_crossing(punct_idx, new_head_idx, head, n): |
| """Check if attaching punct to new_head causes any edge crossing.""" |
| if new_head_idx < 0 or new_head_idx >= n: |
| return False |
|
|
| p_low, p_high = min(punct_idx, new_head_idx), max(punct_idx, new_head_idx) |
|
|
| |
| for j in range(n): |
| if j == punct_idx: |
| continue |
| if head[j] > 0 and head[j] != punct_idx + 1: |
| j_head = head[j] - 1 |
| if j_head < 0 or j_head >= n: |
| continue |
| j_low, j_high = min(j, j_head), max(j, j_head) |
|
|
| |
| |
| if (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high): |
| return True |
|
|
| return False |
|
|
|
|
| def compute_space_after(text, tokens): |
| """Compute SpaceAfter=No for tokens based on original text.""" |
| misc = [] |
| pos = 0 |
| for i, token in enumerate(tokens): |
| |
| token_start = text.find(token, pos) |
| if token_start == -1: |
| |
| misc.append("_") |
| continue |
|
|
| token_end = token_start + len(token) |
| pos = token_end |
|
|
| |
| if token_end < len(text): |
| next_char = text[token_end] |
| if next_char in ' \t\n': |
| misc.append("_") |
| else: |
| misc.append("SpaceAfter=No") |
| else: |
| |
| misc.append("_") |
|
|
| return misc |
|
|
|
|
| def load_sentences(filepath): |
| """Load sentences from input files. |
| |
| Supported formats: |
| - sent_id\\tsentence (build_dataset.py output: sentences_train.txt, etc.) |
| - idx\\tsentence (fetch_data.py output: sentences_vlc.txt, etc.) |
| - idx\\tsource\\tsentence (fetch_uvb_data.py output: sentences_uvb.txt) |
| |
| Returns list of (sent_id, sentence) tuples. For formats without a sent_id, |
| generates one as s{idx}. |
| """ |
| sentences = [] |
| with open(filepath, "r", encoding="utf-8") as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| parts = line.split("\t") |
| if len(parts) == 2: |
| first, second = parts |
| |
| if not first.isdigit(): |
| sentences.append((first, second)) |
| else: |
| sentences.append((f"s{first}", second)) |
| elif len(parts) >= 3: |
| sentences.append((f"s{parts[0]}", parts[2])) |
| return sentences |
|
|
|
|
| def process_single_sentence(args): |
| """Process a single sentence (used for parallel processing).""" |
| idx, text, sent_id = args |
|
|
| try: |
| |
| parsed = dependency_parse(text) |
| tokens = [t[0] for t in parsed] |
| head = [str(t[1]) for t in parsed] |
| deprel = [t[2] for t in parsed] |
|
|
| |
| tagged = pos_tag(text) |
| if len(tagged) == len(tokens): |
| xpos = [t[1] for t in tagged] |
| upos = [to_upos(t[1], t[0]) for t in tagged] |
| else: |
| xpos = ['X'] * len(tokens) |
| upos = ['X'] * len(tokens) |
|
|
| except Exception as e: |
| |
| tagged = pos_tag(text) |
| tokens = [t[0] for t in tagged] |
| xpos = [t[1] for t in tagged] |
| upos = [to_upos(t[1], t[0]) for t in tagged] |
| head = ["0"] * len(tokens) |
| deprel = ["dep"] * len(tokens) |
| if len(tokens) > 0: |
| deprel[0] = "root" |
|
|
| |
| upos, head, deprel = fix_syntax_errors(tokens, upos, head, deprel) |
|
|
| |
| n = len(tokens) |
| lemmas = [t.lower() for t in tokens] |
| feats = ["_"] * n |
| deps = ["_"] * n |
| misc = compute_space_after(text, tokens) |
|
|
| return idx, { |
| "sent_id": sent_id, |
| "text": text, |
| "comments": [f"# sent_id = {sent_id}", f"# text = {text}"], |
| "tokens": tokens, |
| "lemmas": lemmas, |
| "upos": upos, |
| "xpos": xpos, |
| "feats": feats, |
| "head": head, |
| "deprel": deprel, |
| "deps": deps, |
| "misc": misc, |
| "mwt": [], |
| "empty_nodes": [] |
| } |
|
|
|
|
| def convert_to_ud_format(sentences, batch_size=32, num_workers=4): |
| """Convert sentences to UD format using dependency_parse with batch processing. |
| |
| Args: |
| sentences: list of (sent_id, text) tuples or list of text strings |
| batch_size: batch size for GPU processing |
| num_workers: number of workers (unused in batch mode) |
| """ |
| global _models_loaded |
|
|
| |
| if not _models_loaded: |
| print(" Loading models into GPU memory...") |
| _ = dependency_parse("Xin chào") |
| _ = pos_tag("Xin chào") |
| _models_loaded = True |
| print(" Models loaded.") |
|
|
| data = [None] * len(sentences) |
| total = len(sentences) |
|
|
| |
| print(f" Processing {total} sentences with batch_size={batch_size}...") |
|
|
| for batch_start in range(0, total, batch_size): |
| batch_end = min(batch_start + batch_size, total) |
| batch = [] |
| for i in range(batch_start, batch_end): |
| s = sentences[i] |
| if isinstance(s, tuple): |
| sent_id, text = s |
| batch.append((i + 1, text, sent_id)) |
| else: |
| batch.append((i + 1, s, f"s{i + 1}")) |
|
|
| |
| |
| for args in batch: |
| idx, row = process_single_sentence(args) |
| data[idx - 1] = row |
|
|
| |
| processed = batch_end |
| if processed % 100 == 0 or processed == total: |
| print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)") |
|
|
| return data |
|
|
|
|
| def convert_to_ud_format_parallel(sentences, num_workers=None): |
| """Convert sentences using multiple workers (CPU parallelism). |
| |
| Note: This is useful when GPU is bottleneck or for CPU-only processing. |
| For GPU processing, use convert_to_ud_format with batch processing. |
| |
| Args: |
| sentences: list of (sent_id, text) tuples or list of text strings |
| num_workers: number of parallel workers |
| """ |
| global _models_loaded |
|
|
| if num_workers is None: |
| num_workers = min(4, multiprocessing.cpu_count()) |
|
|
| |
| if not _models_loaded: |
| print(" Loading models...") |
| _ = dependency_parse("Xin chào") |
| _ = pos_tag("Xin chào") |
| _models_loaded = True |
| print(" Models loaded.") |
|
|
| data = [None] * len(sentences) |
| total = len(sentences) |
| processed = 0 |
|
|
| print(f" Processing {total} sentences with {num_workers} workers...") |
|
|
| |
| args_list = [] |
| for i in range(total): |
| s = sentences[i] |
| if isinstance(s, tuple): |
| sent_id, text = s |
| args_list.append((i + 1, text, sent_id)) |
| else: |
| args_list.append((i + 1, s, f"s{i + 1}")) |
|
|
| |
| with ThreadPoolExecutor(max_workers=num_workers) as executor: |
| futures = { |
| executor.submit(process_single_sentence, args): i |
| for i, args in enumerate(args_list) |
| } |
|
|
| for future in as_completed(futures): |
| idx, row = future.result() |
| data[idx - 1] = row |
| processed += 1 |
|
|
| if processed % 100 == 0 or processed == total: |
| print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)") |
|
|
| return data |
|
|
|
|
| def save_jsonl(data, filepath): |
| """Save data as JSONL format.""" |
| with open(filepath, "w", encoding="utf-8") as f: |
| for row in data: |
| f.write(json.dumps(row, ensure_ascii=False) + "\n") |
|
|
|
|
| def save_conllu(data, filepath): |
| """Save data as CoNLL-U format.""" |
| with open(filepath, "w", encoding="utf-8") as f: |
| for row in data: |
| f.write(f"# sent_id = {row['sent_id']}\n") |
| f.write(f"# text = {row['text']}\n") |
| for i in range(len(row['tokens'])): |
| |
| line = "\t".join([ |
| str(i + 1), |
| row['tokens'][i], |
| row['lemmas'][i], |
| row['upos'][i], |
| row['xpos'][i], |
| row['feats'][i], |
| row['head'][i], |
| row['deprel'][i], |
| row['deps'][i], |
| row['misc'][i] |
| ]) |
| f.write(line + "\n") |
| f.write("\n") |
|
|
|
|
| def main(): |
| import argparse |
| import time |
| parser = argparse.ArgumentParser(description="Convert sentences to UD format") |
| parser.add_argument("--input", "-i", type=str, help="Input sentences file") |
| parser.add_argument("--output-dir", "-o", type=str, help="Output directory") |
| parser.add_argument("--prefix", "-p", type=str, default="train", help="Output file prefix") |
| parser.add_argument("--batch-size", "-b", type=int, default=64, |
| help="Batch size for GPU processing (default: 64, increase for more GPU usage)") |
| parser.add_argument("--parallel", action="store_true", |
| help="Use parallel processing with multiple workers") |
| parser.add_argument("--workers", "-w", type=int, default=4, |
| help="Number of workers for parallel processing (default: 4)") |
| args = parser.parse_args() |
|
|
| |
| if args.input: |
| sentences_file = args.input |
| else: |
| source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1") |
| sentences_file = join(source_folder, "sentences.txt") |
|
|
| if args.output_dir: |
| output_dir = args.output_dir |
| else: |
| output_dir = dirname(sentences_file) |
|
|
| print("Loading sentences...") |
| sentences = load_sentences(sentences_file) |
| print(f"Loaded {len(sentences)} sentences") |
|
|
| |
| if torch.cuda.is_available(): |
| print(f"GPU: {torch.cuda.get_device_name(0)}") |
| print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB") |
| else: |
| print("GPU: Not available (using CPU)") |
|
|
| print(f"\nConverting to UD format (batch_size={args.batch_size})...") |
| start_time = time.time() |
|
|
| if args.parallel: |
| data = convert_to_ud_format_parallel(sentences, num_workers=args.workers) |
| else: |
| data = convert_to_ud_format(sentences, batch_size=args.batch_size) |
|
|
| elapsed = time.time() - start_time |
| speed = len(sentences) / elapsed |
| print(f"\nCompleted in {elapsed:.1f}s ({speed:.1f} sentences/sec)") |
|
|
| |
| jsonl_file = join(output_dir, f"{args.prefix}.jsonl") |
| save_jsonl(data, jsonl_file) |
| print(f"Saved JSONL to: {jsonl_file}") |
|
|
| |
| conllu_file = join(output_dir, f"{args.prefix}.conllu") |
| save_conllu(data, conllu_file) |
| print(f"Saved CoNLL-U to: {conllu_file}") |
|
|
| |
| print("\nSample row:") |
| sample = data[0] |
| print(f" sent_id: {sample['sent_id']}") |
| print(f" text: {sample['text'][:60]}...") |
| print(f" tokens: {sample['tokens'][:5]}...") |
| print(f" upos: {sample['upos'][:5]}...") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|