|
|
""" |
|
|
Statistics for UD Vietnamese Dataset (UDD-v0.1) |
|
|
""" |
|
|
|
|
|
from collections import Counter |
|
|
from os.path import dirname, join |
|
|
|
|
|
|
|
|
def parse_conllu(filepath): |
|
|
"""Parse CoNLL-U file and return sentences.""" |
|
|
sentences = [] |
|
|
current_sentence = { |
|
|
'tokens': [], |
|
|
'upos': [], |
|
|
'deprel': [], |
|
|
'head': [], |
|
|
'metadata': {} |
|
|
} |
|
|
|
|
|
with open(filepath, 'r', encoding='utf-8') as f: |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if not line: |
|
|
if current_sentence['tokens']: |
|
|
sentences.append(current_sentence) |
|
|
current_sentence = { |
|
|
'tokens': [], |
|
|
'upos': [], |
|
|
'deprel': [], |
|
|
'head': [], |
|
|
'metadata': {} |
|
|
} |
|
|
elif line.startswith('#'): |
|
|
|
|
|
if '=' in line: |
|
|
key, value = line[2:].split('=', 1) |
|
|
current_sentence['metadata'][key.strip()] = value.strip() |
|
|
else: |
|
|
parts = line.split('\t') |
|
|
if len(parts) >= 10: |
|
|
|
|
|
if '-' in parts[0] or '.' in parts[0]: |
|
|
continue |
|
|
current_sentence['tokens'].append(parts[1]) |
|
|
current_sentence['upos'].append(parts[3]) |
|
|
current_sentence['head'].append(parts[6]) |
|
|
current_sentence['deprel'].append(parts[7]) |
|
|
|
|
|
|
|
|
if current_sentence['tokens']: |
|
|
sentences.append(current_sentence) |
|
|
|
|
|
return sentences |
|
|
|
|
|
|
|
|
def compute_statistics(sentences): |
|
|
"""Compute statistics from parsed sentences.""" |
|
|
stats = {} |
|
|
|
|
|
|
|
|
stats['num_sentences'] = len(sentences) |
|
|
stats['num_tokens'] = sum(len(s['tokens']) for s in sentences) |
|
|
|
|
|
|
|
|
sent_lengths = [len(s['tokens']) for s in sentences] |
|
|
stats['avg_sent_length'] = sum(sent_lengths) / len(sent_lengths) if sent_lengths else 0 |
|
|
stats['min_sent_length'] = min(sent_lengths) if sent_lengths else 0 |
|
|
stats['max_sent_length'] = max(sent_lengths) if sent_lengths else 0 |
|
|
|
|
|
|
|
|
all_upos = [] |
|
|
for s in sentences: |
|
|
all_upos.extend(s['upos']) |
|
|
stats['upos_counts'] = Counter(all_upos) |
|
|
|
|
|
|
|
|
all_deprel = [] |
|
|
for s in sentences: |
|
|
all_deprel.extend(s['deprel']) |
|
|
stats['deprel_counts'] = Counter(all_deprel) |
|
|
|
|
|
|
|
|
depths = [] |
|
|
for s in sentences: |
|
|
max_depth = compute_tree_depth(s['head']) |
|
|
depths.append(max_depth) |
|
|
stats['avg_tree_depth'] = sum(depths) / len(depths) if depths else 0 |
|
|
stats['max_tree_depth'] = max(depths) if depths else 0 |
|
|
|
|
|
|
|
|
root_upos = [] |
|
|
for s in sentences: |
|
|
for i, (upos, deprel) in enumerate(zip(s['upos'], s['deprel'])): |
|
|
if deprel == 'root': |
|
|
root_upos.append(upos) |
|
|
stats['root_upos_counts'] = Counter(root_upos) |
|
|
|
|
|
return stats |
|
|
|
|
|
|
|
|
def compute_tree_depth(heads): |
|
|
"""Compute maximum depth of dependency tree.""" |
|
|
n = len(heads) |
|
|
if n == 0: |
|
|
return 0 |
|
|
|
|
|
depths = [0] * n |
|
|
|
|
|
def get_depth(idx): |
|
|
if depths[idx] > 0: |
|
|
return depths[idx] |
|
|
head = int(heads[idx]) |
|
|
if head == 0: |
|
|
depths[idx] = 1 |
|
|
else: |
|
|
depths[idx] = get_depth(head - 1) + 1 |
|
|
return depths[idx] |
|
|
|
|
|
for i in range(n): |
|
|
try: |
|
|
get_depth(i) |
|
|
except (RecursionError, IndexError): |
|
|
depths[i] = 1 |
|
|
|
|
|
return max(depths) if depths else 0 |
|
|
|
|
|
|
|
|
def print_statistics(stats): |
|
|
"""Print statistics in a nice format.""" |
|
|
print("=" * 60) |
|
|
print("UD Vietnamese Dataset (UDD-v0.1) Statistics") |
|
|
print("=" * 60) |
|
|
|
|
|
print("\n## Basic Statistics") |
|
|
print(f" Sentences: {stats['num_sentences']:,}") |
|
|
print(f" Tokens: {stats['num_tokens']:,}") |
|
|
print(f" Avg sent length: {stats['avg_sent_length']:.2f}") |
|
|
print(f" Min sent length: {stats['min_sent_length']}") |
|
|
print(f" Max sent length: {stats['max_sent_length']}") |
|
|
print(f" Avg tree depth: {stats['avg_tree_depth']:.2f}") |
|
|
print(f" Max tree depth: {stats['max_tree_depth']}") |
|
|
|
|
|
print("\n## UPOS Distribution") |
|
|
print(f" {'Tag':<10} {'Count':>8} {'Percent':>8}") |
|
|
print(" " + "-" * 28) |
|
|
total_tokens = stats['num_tokens'] |
|
|
for tag, count in stats['upos_counts'].most_common(): |
|
|
pct = count / total_tokens * 100 |
|
|
print(f" {tag:<10} {count:>8,} {pct:>7.2f}%") |
|
|
|
|
|
print("\n## DEPREL Distribution") |
|
|
print(f" {'Relation':<20} {'Count':>8} {'Percent':>8}") |
|
|
print(" " + "-" * 38) |
|
|
for rel, count in stats['deprel_counts'].most_common(): |
|
|
pct = count / total_tokens * 100 |
|
|
print(f" {rel:<20} {count:>8,} {pct:>7.2f}%") |
|
|
|
|
|
print("\n## Root UPOS Distribution") |
|
|
print(f" {'UPOS':<10} {'Count':>8} {'Percent':>8}") |
|
|
print(" " + "-" * 28) |
|
|
total_roots = sum(stats['root_upos_counts'].values()) |
|
|
for tag, count in stats['root_upos_counts'].most_common(): |
|
|
pct = count / total_roots * 100 |
|
|
print(f" {tag:<10} {count:>8,} {pct:>7.2f}%") |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
base_dir = dirname(dirname(__file__)) |
|
|
conllu_file = join(base_dir, 'train.conllu') |
|
|
|
|
|
print(f"Reading: {conllu_file}") |
|
|
sentences = parse_conllu(conllu_file) |
|
|
|
|
|
stats = compute_statistics(sentences) |
|
|
print_statistics(stats) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|