UDD-1 / scripts /convert_to_ud.py
rain1024's picture
Add batch processing and run script for GPU optimization
73132c4
"""
Convert sentences to Universal Dependencies format compatible with HuggingFace.
Structure follows: https://huggingface.co/datasets/commul/universal_dependencies/viewer/vi_vtb
Uses underthesea dependency_parse for proper annotations.
Optimized for GPU batch processing.
"""
import json
import os
from os.path import dirname, expanduser, join
from concurrent.futures import ThreadPoolExecutor, as_completed
import multiprocessing
# Fix GPU tensor compatibility issue with pack_padded_sequence
# The lengths tensor must be on CPU even when using CUDA
import torch
_original_pack = torch.nn.utils.rnn.pack_padded_sequence
def _patched_pack(input, lengths, batch_first=False, enforce_sorted=True):
if lengths.is_cuda:
lengths = lengths.cpu()
return _original_pack(input, lengths, batch_first=batch_first, enforce_sorted=enforce_sorted)
torch.nn.utils.rnn.pack_padded_sequence = _patched_pack
from underthesea import dependency_parse, pos_tag
# Global model cache for batch processing
_models_loaded = False
# Map Vietnamese POS tags to Universal POS tags
# Based on: https://universaldependencies.org/u/pos/
UPOS_MAP = {
'N': 'NOUN', # Noun
'Np': 'PROPN', # Proper noun
'Nc': 'NOUN', # Classifier noun
'Nu': 'NOUN', # Unit noun
'V': 'VERB', # Verb
'A': 'ADJ', # Adjective
'P': 'PRON', # Pronoun
'R': 'ADV', # Adverb
'L': 'DET', # Determiner/Quantifier
'M': 'NUM', # Numeral
'E': 'ADP', # Preposition
'C': 'CCONJ', # Coordinating conjunction
'CC': 'CCONJ', # Coordinating conjunction
'SC': 'SCONJ', # Subordinating conjunction
'I': 'INTJ', # Interjection
'T': 'PART', # Particle
'B': 'X', # Foreign word
'Y': 'X', # Abbreviation
'S': 'SYM', # Symbol
'X': 'X', # Other
'CH': 'PUNCT', # Punctuation
'Ny': 'NOUN', # Noun (variant)
}
# Vietnamese auxiliary verbs that should be tagged as AUX
# Based on UD Vietnamese validation data (data.json)
AUX_WORDS = {
'bị', 'chưa thể', 'chắc chắn', 'có thể', 'có vẻ', 'cần',
'giả', 'không thể', 'là', 'muốn', 'nghĩa là', 'nhằm',
'nên', 'phải', 'quyết', 'thôi', 'thể', 'xong', 'được', 'định'
}
# Vietnamese determiners - words that should be DET when used as 'det' relation
DET_WORDS = {
'các', 'những', 'mọi', 'mỗi', 'từng', 'bất kỳ', 'một', 'hai', 'ba',
'này', 'đó', 'kia', 'ấy', 'nọ', 'nào', 'đấy', 'cái', 'con', 'chiếc',
'người', 'cả', 'phá tán' # Words that appear as det in the data
}
# Words that can be ADV when used as 'advmod'
ADV_WORDS = {
'không', 'chưa', 'đã', 'đang', 'sẽ', 'còn', 'vẫn', 'cũng', 'rất',
'quá', 'lắm', 'hơn', 'nhất', 'luôn', 'thường', 'hay', 'ít', 'nhiều',
'tự', 'một cách', 'được', 'không thể', 'lại', 'cá biệt', 'dân sự'
}
# Invalid deprels that need to be mapped to valid ones
DEPREL_MAP = {
'acomp': 'xcomp', # Adjectival complement -> open clausal complement
'nmod:comp': 'nmod', # Invalid subtype
'nmod:agent': 'obl:agent', # Agent should be obl not nmod
'nmod:with': 'nmod', # Invalid subtype
'nmod:about': 'nmod', # Invalid subtype -> nmod
'compound:number': 'nummod', # Number compounds should be nummod
'compound:nmod': 'compound', # Invalid subtype
'obl:pcomp': 'obl', # Invalid subtype -> obl
}
def to_upos(tag, token=None):
"""Convert Vietnamese POS tag to Universal POS tag."""
# Check if token is an auxiliary verb (case insensitive)
if token:
token_lower = token.lower()
if token_lower in AUX_WORDS:
return 'AUX'
# Also check if lowercased token matches
for aux in AUX_WORDS:
if token_lower == aux.lower():
return 'AUX'
return UPOS_MAP.get(tag, 'X')
def fix_syntax_errors(tokens, upos, head, deprel):
"""
Post-process to fix common UD SYNTAX validation errors.
Returns fixed (upos, deprel) lists.
Run multiple passes to handle dependencies between fixes.
"""
n = len(tokens)
upos = list(upos)
deprel = list(deprel)
head = [int(h) for h in head]
# First pass: fix leaf nodes (aux/mark/case/punct should not have children)
# Need multiple passes to handle chains of leaf nodes
for _ in range(5): # Multiple passes to handle chains
changed = False
for i in range(n):
rel = deprel[i]
# Leaf nodes should not have children - redirect children to parent
# Include subtypes like aux:pass, mark:pcomp, etc.
# Also include det, nummod, clf which should be leaves
if rel.split(':')[0] in ('aux', 'cop', 'mark', 'case', 'punct', 'det', 'nummod', 'clf'):
has_children = any(head[j] == i + 1 for j in range(n))
if has_children:
my_head = head[i]
for j in range(n):
if head[j] == i + 1:
head[j] = my_head
changed = True
if not changed:
break
for i in range(n):
token_lower = tokens[i].lower()
rel = deprel[i]
pos = upos[i]
# Fix 0: Map invalid deprels to valid ones
if rel in DEPREL_MAP:
deprel[i] = DEPREL_MAP[rel]
rel = deprel[i]
# Fix 1: rel-upos-det - 'det' (including subtypes) should be DET or PRON
if rel.startswith('det') and pos not in ('DET', 'PRON'):
# Force all 'det' relations to have DET or PRON UPOS
upos[i] = 'DET'
# Fix 2: rel-upos-advmod - 'advmod' (including subtypes) should be ADV
if rel.startswith('advmod') and pos != 'ADV':
# For advmod, always prefer changing UPOS to ADV
upos[i] = 'ADV'
# Fix 2b: rel-upos-nummod - 'nummod' should be NUM
if rel.startswith('nummod') and upos[i] != 'NUM':
# If token is clearly not a number (e.g., VERB), change relation instead
if upos[i] == 'VERB':
deprel[i] = 'acl' # Adjectival clause for verbs
rel = 'acl' # Update local variable too
elif upos[i] == 'ADJ':
deprel[i] = 'amod' # Adjectival modifier
rel = 'amod'
else:
upos[i] = 'NUM'
# Fix 3: rel-upos-mark - 'mark' (including subtypes) should not be AUX
if rel.startswith('mark') and pos == 'AUX':
upos[i] = 'SCONJ'
# Fix 3b: rel-upos-punct - 'punct' must be PUNCT, and PUNCT must have 'punct' deprel
if rel == 'punct' and pos != 'PUNCT':
# Change relation to something appropriate based on POS
if pos in ('VERB', 'NOUN', 'ADJ'):
deprel[i] = 'dep' # Use generic dependency
else:
upos[i] = 'PUNCT'
# Fix 3b2: upos-rel-punct - PUNCT must have 'punct' deprel
if pos == 'PUNCT' and rel != 'punct':
deprel[i] = 'punct'
rel = 'punct'
# Fix 3c: rel-upos-case - 'case' should be ADP, not ADJ, AUX or PROPN
if rel == 'case' and pos in ('ADJ', 'AUX', 'PROPN', 'NOUN', 'VERB'):
upos[i] = 'ADP'
# Fix 3d: rel-upos-cc - 'cc' should be CCONJ or SCONJ
if rel == 'cc' and pos not in ('CCONJ', 'SCONJ'):
upos[i] = 'CCONJ'
# Fix 3e: rel-upos-aux - 'aux' should be AUX, but only for valid auxiliaries
is_valid_aux = token_lower in AUX_WORDS or any(token_lower == aux.lower() for aux in AUX_WORDS)
if rel.startswith('aux'):
if is_valid_aux:
upos[i] = 'AUX'
pos = 'AUX'
else:
# Not a valid auxiliary - change relation to advcl or xcomp
if pos == 'VERB' or upos[i] == 'VERB':
deprel[i] = 'advcl'
upos[i] = 'VERB'
elif pos == 'ADP' or upos[i] == 'ADP':
deprel[i] = 'mark'
upos[i] = 'ADP'
else:
deprel[i] = 'xcomp'
rel = deprel[i]
pos = upos[i]
# Also fix AUX UPOS that's not a valid auxiliary (MORPHO aux-lemma)
elif pos == 'AUX' and not is_valid_aux:
upos[i] = 'VERB' # Default to VERB for non-aux
pos = 'VERB'
# Fix 3f: rel-upos-cop - 'cop' should be AUX or PRON/DET, only 'là' is valid copula
if rel == 'cop':
if token_lower != 'là':
# Not a valid copula, change to xcomp
deprel[i] = 'xcomp'
rel = 'xcomp'
elif pos not in ('AUX', 'PRON', 'DET'):
upos[i] = 'AUX'
# Fix 4: obl-should-be-nmod - when parent is nominal, use nmod
if rel.startswith('obl') and head[i] > 0:
parent_idx = head[i] - 1
if parent_idx < n and upos[parent_idx] in ('NOUN', 'PROPN', 'PRON'):
# Preserve subtype if exists
if ':' in rel:
deprel[i] = 'nmod:' + rel.split(':')[1]
else:
deprel[i] = 'nmod'
# Fix 5: (handled in first pass above)
# Fix 5b: right-to-left relations - flat/conj/appos must be left-to-right
for i in range(n):
rel = deprel[i]
base_rel = rel.split(':')[0]
if base_rel in ('flat', 'conj', 'appos') and head[i] > 0:
parent_idx = head[i] - 1
if parent_idx > i: # Parent comes after child (wrong direction)
# Change to compound which allows both directions
if ':' in rel:
deprel[i] = 'compound:' + rel.split(':')[1]
else:
deprel[i] = 'compound'
# Fix 5c: Apply DEPREL_MAP again to catch any newly created invalid deprels
for i in range(n):
if deprel[i] in DEPREL_MAP:
deprel[i] = DEPREL_MAP[deprel[i]]
# Fix 5d: Final check for nummod with wrong UPOS
for i in range(n):
if deprel[i].startswith('nummod') and upos[i] != 'NUM':
if upos[i] == 'VERB':
deprel[i] = 'acl'
elif upos[i] == 'ADJ':
deprel[i] = 'amod'
elif upos[i] == 'NOUN':
deprel[i] = 'nmod'
else:
upos[i] = 'NUM'
# Fix 6: too-many-subjects - add :outer subtype for multiple subjects
# Group all subject types (nsubj, csubj) by predicate
predicates = {}
for i in range(n):
base_rel = deprel[i].split(':')[0]
if base_rel in ('nsubj', 'csubj') and head[i] > 0:
pred_idx = head[i]
if pred_idx not in predicates:
predicates[pred_idx] = []
predicates[pred_idx].append((i, base_rel))
for pred_idx, subj_list in predicates.items():
if len(subj_list) > 1:
# Sort by position to keep first subject as main
subj_list.sort(key=lambda x: x[0])
# Mark all but the first as :outer (only nsubj:outer is valid, not csubj:outer)
for idx, base_rel in subj_list[1:]:
if ':outer' not in deprel[idx]:
# csubj:outer is not a valid UD relation, use nsubj:outer instead
deprel[idx] = 'nsubj:outer'
# Fix 7: too-many-objects - add :pass or compound for multiple objects
predicates_obj = {}
for i in range(n):
if deprel[i] == 'obj' and head[i] > 0:
pred_idx = head[i]
if pred_idx not in predicates_obj:
predicates_obj[pred_idx] = []
predicates_obj[pred_idx].append(i)
for pred_idx, obj_indices in predicates_obj.items():
if len(obj_indices) > 1:
# Mark subsequent objects as compound
for idx in obj_indices[1:]:
# Check if it's adjacent to previous - likely compound
if idx > 0 and obj_indices[0] == idx - 1:
deprel[idx] = 'compound'
else:
deprel[idx] = 'iobj'
# Fix 8: punct-is-nonproj - attach punctuation to avoid non-projectivity
# Try to find the best attachment point that doesn't cross other edges
for i in range(n):
if upos[i] == 'PUNCT':
# Try candidates in order: previous token, next token, then expand outward
candidates = []
if i > 0:
candidates.append(i) # Previous token (1-based)
if i + 1 < n:
candidates.append(i + 2) # Next token (1-based)
# Expand to find more candidates
for dist in range(2, n):
if i - dist >= 0:
candidates.append(i - dist + 1) # 1-based
if i + dist < n:
candidates.append(i + dist + 1) # 1-based
# Find best attachment that doesn't cause crossing
best_head = candidates[0] if candidates else 1
for cand in candidates:
test_head = list(head)
test_head[i] = cand
if not punct_causes_crossing(i, cand - 1, test_head, n):
best_head = cand
break
head[i] = best_head
return upos, [str(h) for h in head], deprel
def punct_causes_crossing(punct_idx, new_head_idx, head, n):
"""Check if attaching punct to new_head causes any edge crossing."""
if new_head_idx < 0 or new_head_idx >= n:
return False
p_low, p_high = min(punct_idx, new_head_idx), max(punct_idx, new_head_idx)
# Check all other edges for crossing with this punct edge
for j in range(n):
if j == punct_idx:
continue
if head[j] > 0 and head[j] != punct_idx + 1: # j has a head and it's not punct
j_head = head[j] - 1
if j_head < 0 or j_head >= n:
continue
j_low, j_high = min(j, j_head), max(j, j_head)
# Check if edges cross (one endpoint inside, one outside)
# Edges cross if: (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high)
if (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high):
return True
return False
def compute_space_after(text, tokens):
"""Compute SpaceAfter=No for tokens based on original text."""
misc = []
pos = 0
for i, token in enumerate(tokens):
# Find token in text
token_start = text.find(token, pos)
if token_start == -1:
# Token not found, assume space after
misc.append("_")
continue
token_end = token_start + len(token)
pos = token_end
# Check if there's a space after this token
if token_end < len(text):
next_char = text[token_end]
if next_char in ' \t\n':
misc.append("_")
else:
misc.append("SpaceAfter=No")
else:
# End of text
misc.append("_")
return misc
def load_sentences(filepath):
"""Load sentences from sentences.txt or sentences_uvb.txt"""
sentences = []
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
parts = line.split("\t")
# Handle both formats:
# sentences.txt: idx\tsentence
# sentences_uvb.txt: idx\tsource\tsentence
if len(parts) == 2:
sentences.append(parts[1])
elif len(parts) >= 3:
sentences.append(parts[2])
return sentences
def process_single_sentence(args):
"""Process a single sentence (used for parallel processing)."""
idx, text = args
sent_id = f"s{idx}"
try:
# Use dependency_parse for tokens, heads, and deprels
parsed = dependency_parse(text)
tokens = [t[0] for t in parsed]
head = [str(t[1]) for t in parsed]
deprel = [t[2] for t in parsed]
# Get POS tags
tagged = pos_tag(text)
if len(tagged) == len(tokens):
xpos = [t[1] for t in tagged]
upos = [to_upos(t[1], t[0]) for t in tagged]
else:
xpos = ['X'] * len(tokens)
upos = ['X'] * len(tokens)
except Exception as e:
# Fallback to pos_tag only
tagged = pos_tag(text)
tokens = [t[0] for t in tagged]
xpos = [t[1] for t in tagged]
upos = [to_upos(t[1], t[0]) for t in tagged]
head = ["0"] * len(tokens)
deprel = ["dep"] * len(tokens)
if len(tokens) > 0:
deprel[0] = "root"
# Apply syntax fixes
upos, head, deprel = fix_syntax_errors(tokens, upos, head, deprel)
# Create other fields
n = len(tokens)
lemmas = [t.lower() for t in tokens]
feats = ["_"] * n
deps = ["_"] * n
misc = compute_space_after(text, tokens)
return idx, {
"sent_id": sent_id,
"text": text,
"comments": [f"# sent_id = {sent_id}", f"# text = {text}"],
"tokens": tokens,
"lemmas": lemmas,
"upos": upos,
"xpos": xpos,
"feats": feats,
"head": head,
"deprel": deprel,
"deps": deps,
"misc": misc,
"mwt": [],
"empty_nodes": []
}
def convert_to_ud_format(sentences, batch_size=32, num_workers=4):
"""Convert sentences to UD format using dependency_parse with batch processing."""
global _models_loaded
# Pre-warm models with a dummy sentence to load them into GPU memory
if not _models_loaded:
print(" Loading models into GPU memory...")
_ = dependency_parse("Xin chào")
_ = pos_tag("Xin chào")
_models_loaded = True
print(" Models loaded.")
data = [None] * len(sentences)
total = len(sentences)
# Process in batches for better GPU utilization
print(f" Processing {total} sentences with batch_size={batch_size}...")
for batch_start in range(0, total, batch_size):
batch_end = min(batch_start + batch_size, total)
batch = [(i + 1, sentences[i]) for i in range(batch_start, batch_end)]
# Process batch - GPU models benefit from sequential calls within batch
# as they can better utilize GPU memory
for args in batch:
idx, row = process_single_sentence(args)
data[idx - 1] = row
# Progress update
processed = batch_end
if processed % 100 == 0 or processed == total:
print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)")
return data
def convert_to_ud_format_parallel(sentences, num_workers=None):
"""Convert sentences using multiple workers (CPU parallelism).
Note: This is useful when GPU is bottleneck or for CPU-only processing.
For GPU processing, use convert_to_ud_format with batch processing.
"""
global _models_loaded
if num_workers is None:
num_workers = min(4, multiprocessing.cpu_count())
# Pre-warm models
if not _models_loaded:
print(" Loading models...")
_ = dependency_parse("Xin chào")
_ = pos_tag("Xin chào")
_models_loaded = True
print(" Models loaded.")
data = [None] * len(sentences)
total = len(sentences)
processed = 0
print(f" Processing {total} sentences with {num_workers} workers...")
# Use ThreadPoolExecutor for I/O bound tasks with GPU
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = {
executor.submit(process_single_sentence, (i + 1, sentences[i])): i
for i in range(total)
}
for future in as_completed(futures):
idx, row = future.result()
data[idx - 1] = row
processed += 1
if processed % 100 == 0 or processed == total:
print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)")
return data
def save_jsonl(data, filepath):
"""Save data as JSONL format."""
with open(filepath, "w", encoding="utf-8") as f:
for row in data:
f.write(json.dumps(row, ensure_ascii=False) + "\n")
def save_conllu(data, filepath):
"""Save data as CoNLL-U format."""
with open(filepath, "w", encoding="utf-8") as f:
for row in data:
f.write(f"# sent_id = {row['sent_id']}\n")
f.write(f"# text = {row['text']}\n")
for i in range(len(row['tokens'])):
# ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC
line = "\t".join([
str(i + 1),
row['tokens'][i],
row['lemmas'][i],
row['upos'][i],
row['xpos'][i],
row['feats'][i],
row['head'][i],
row['deprel'][i],
row['deps'][i],
row['misc'][i]
])
f.write(line + "\n")
f.write("\n")
def main():
import argparse
import time
parser = argparse.ArgumentParser(description="Convert sentences to UD format")
parser.add_argument("--input", "-i", type=str, help="Input sentences file")
parser.add_argument("--output-dir", "-o", type=str, help="Output directory")
parser.add_argument("--prefix", "-p", type=str, default="train", help="Output file prefix")
parser.add_argument("--batch-size", "-b", type=int, default=64,
help="Batch size for GPU processing (default: 64, increase for more GPU usage)")
parser.add_argument("--parallel", action="store_true",
help="Use parallel processing with multiple workers")
parser.add_argument("--workers", "-w", type=int, default=4,
help="Number of workers for parallel processing (default: 4)")
args = parser.parse_args()
# Default paths
if args.input:
sentences_file = args.input
else:
source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1")
sentences_file = join(source_folder, "sentences.txt")
if args.output_dir:
output_dir = args.output_dir
else:
output_dir = dirname(sentences_file)
print("Loading sentences...")
sentences = load_sentences(sentences_file)
print(f"Loaded {len(sentences)} sentences")
# Check GPU availability
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name(0)}")
print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
else:
print("GPU: Not available (using CPU)")
print(f"\nConverting to UD format (batch_size={args.batch_size})...")
start_time = time.time()
if args.parallel:
data = convert_to_ud_format_parallel(sentences, num_workers=args.workers)
else:
data = convert_to_ud_format(sentences, batch_size=args.batch_size)
elapsed = time.time() - start_time
speed = len(sentences) / elapsed
print(f"\nCompleted in {elapsed:.1f}s ({speed:.1f} sentences/sec)")
# Save as JSONL (for HuggingFace)
jsonl_file = join(output_dir, f"{args.prefix}.jsonl")
save_jsonl(data, jsonl_file)
print(f"Saved JSONL to: {jsonl_file}")
# Save as CoNLL-U (standard UD format)
conllu_file = join(output_dir, f"{args.prefix}.conllu")
save_conllu(data, conllu_file)
print(f"Saved CoNLL-U to: {conllu_file}")
# Print sample
print("\nSample row:")
sample = data[0]
print(f" sent_id: {sample['sent_id']}")
print(f" text: {sample['text'][:60]}...")
print(f" tokens: {sample['tokens'][:5]}...")
print(f" upos: {sample['upos'][:5]}...")
if __name__ == "__main__":
main()