rain1024 commited on
Commit
45fa3de
·
verified ·
1 Parent(s): d6c4c28

Add scripts/statistics.py

Browse files
Files changed (1) hide show
  1. scripts/statistics.py +182 -0
scripts/statistics.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Statistics for UD Vietnamese Dataset (UDD-v0.1)
3
+ """
4
+
5
+ from collections import Counter
6
+ from os.path import dirname, join
7
+
8
+
9
+ def parse_conllu(filepath):
10
+ """Parse CoNLL-U file and return sentences."""
11
+ sentences = []
12
+ current_sentence = {
13
+ 'tokens': [],
14
+ 'upos': [],
15
+ 'deprel': [],
16
+ 'head': [],
17
+ 'metadata': {}
18
+ }
19
+
20
+ with open(filepath, 'r', encoding='utf-8') as f:
21
+ for line in f:
22
+ line = line.strip()
23
+ if not line:
24
+ if current_sentence['tokens']:
25
+ sentences.append(current_sentence)
26
+ current_sentence = {
27
+ 'tokens': [],
28
+ 'upos': [],
29
+ 'deprel': [],
30
+ 'head': [],
31
+ 'metadata': {}
32
+ }
33
+ elif line.startswith('#'):
34
+ # Metadata
35
+ if '=' in line:
36
+ key, value = line[2:].split('=', 1)
37
+ current_sentence['metadata'][key.strip()] = value.strip()
38
+ else:
39
+ parts = line.split('\t')
40
+ if len(parts) >= 10:
41
+ # Skip multi-word tokens (e.g., 1-2)
42
+ if '-' in parts[0] or '.' in parts[0]:
43
+ continue
44
+ current_sentence['tokens'].append(parts[1])
45
+ current_sentence['upos'].append(parts[3])
46
+ current_sentence['head'].append(parts[6])
47
+ current_sentence['deprel'].append(parts[7])
48
+
49
+ # Add last sentence if exists
50
+ if current_sentence['tokens']:
51
+ sentences.append(current_sentence)
52
+
53
+ return sentences
54
+
55
+
56
+ def compute_statistics(sentences):
57
+ """Compute statistics from parsed sentences."""
58
+ stats = {}
59
+
60
+ # Basic counts
61
+ stats['num_sentences'] = len(sentences)
62
+ stats['num_tokens'] = sum(len(s['tokens']) for s in sentences)
63
+
64
+ # Sentence length statistics
65
+ sent_lengths = [len(s['tokens']) for s in sentences]
66
+ stats['avg_sent_length'] = sum(sent_lengths) / len(sent_lengths) if sent_lengths else 0
67
+ stats['min_sent_length'] = min(sent_lengths) if sent_lengths else 0
68
+ stats['max_sent_length'] = max(sent_lengths) if sent_lengths else 0
69
+
70
+ # UPOS distribution
71
+ all_upos = []
72
+ for s in sentences:
73
+ all_upos.extend(s['upos'])
74
+ stats['upos_counts'] = Counter(all_upos)
75
+
76
+ # DEPREL distribution
77
+ all_deprel = []
78
+ for s in sentences:
79
+ all_deprel.extend(s['deprel'])
80
+ stats['deprel_counts'] = Counter(all_deprel)
81
+
82
+ # Tree depth statistics
83
+ depths = []
84
+ for s in sentences:
85
+ max_depth = compute_tree_depth(s['head'])
86
+ depths.append(max_depth)
87
+ stats['avg_tree_depth'] = sum(depths) / len(depths) if depths else 0
88
+ stats['max_tree_depth'] = max(depths) if depths else 0
89
+
90
+ # Root relation counts
91
+ root_upos = []
92
+ for s in sentences:
93
+ for i, (upos, deprel) in enumerate(zip(s['upos'], s['deprel'])):
94
+ if deprel == 'root':
95
+ root_upos.append(upos)
96
+ stats['root_upos_counts'] = Counter(root_upos)
97
+
98
+ return stats
99
+
100
+
101
+ def compute_tree_depth(heads):
102
+ """Compute maximum depth of dependency tree."""
103
+ n = len(heads)
104
+ if n == 0:
105
+ return 0
106
+
107
+ depths = [0] * n
108
+
109
+ def get_depth(idx):
110
+ if depths[idx] > 0:
111
+ return depths[idx]
112
+ head = int(heads[idx])
113
+ if head == 0:
114
+ depths[idx] = 1
115
+ else:
116
+ depths[idx] = get_depth(head - 1) + 1
117
+ return depths[idx]
118
+
119
+ for i in range(n):
120
+ try:
121
+ get_depth(i)
122
+ except (RecursionError, IndexError):
123
+ depths[i] = 1
124
+
125
+ return max(depths) if depths else 0
126
+
127
+
128
+ def print_statistics(stats):
129
+ """Print statistics in a nice format."""
130
+ print("=" * 60)
131
+ print("UD Vietnamese Dataset (UDD-v0.1) Statistics")
132
+ print("=" * 60)
133
+
134
+ print("\n## Basic Statistics")
135
+ print(f" Sentences: {stats['num_sentences']:,}")
136
+ print(f" Tokens: {stats['num_tokens']:,}")
137
+ print(f" Avg sent length: {stats['avg_sent_length']:.2f}")
138
+ print(f" Min sent length: {stats['min_sent_length']}")
139
+ print(f" Max sent length: {stats['max_sent_length']}")
140
+ print(f" Avg tree depth: {stats['avg_tree_depth']:.2f}")
141
+ print(f" Max tree depth: {stats['max_tree_depth']}")
142
+
143
+ print("\n## UPOS Distribution")
144
+ print(f" {'Tag':<10} {'Count':>8} {'Percent':>8}")
145
+ print(" " + "-" * 28)
146
+ total_tokens = stats['num_tokens']
147
+ for tag, count in stats['upos_counts'].most_common():
148
+ pct = count / total_tokens * 100
149
+ print(f" {tag:<10} {count:>8,} {pct:>7.2f}%")
150
+
151
+ print("\n## DEPREL Distribution")
152
+ print(f" {'Relation':<20} {'Count':>8} {'Percent':>8}")
153
+ print(" " + "-" * 38)
154
+ for rel, count in stats['deprel_counts'].most_common():
155
+ pct = count / total_tokens * 100
156
+ print(f" {rel:<20} {count:>8,} {pct:>7.2f}%")
157
+
158
+ print("\n## Root UPOS Distribution")
159
+ print(f" {'UPOS':<10} {'Count':>8} {'Percent':>8}")
160
+ print(" " + "-" * 28)
161
+ total_roots = sum(stats['root_upos_counts'].values())
162
+ for tag, count in stats['root_upos_counts'].most_common():
163
+ pct = count / total_roots * 100
164
+ print(f" {tag:<10} {count:>8,} {pct:>7.2f}%")
165
+
166
+ print("\n" + "=" * 60)
167
+
168
+
169
+ def main():
170
+ # Find train.conllu file
171
+ base_dir = dirname(dirname(__file__))
172
+ conllu_file = join(base_dir, 'train.conllu')
173
+
174
+ print(f"Reading: {conllu_file}")
175
+ sentences = parse_conllu(conllu_file)
176
+
177
+ stats = compute_statistics(sentences)
178
+ print_statistics(stats)
179
+
180
+
181
+ if __name__ == "__main__":
182
+ main()