File size: 19,390 Bytes
d6c4c28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
"""
Convert sentences to Universal Dependencies format compatible with HuggingFace.
Structure follows: https://huggingface.co/datasets/commul/universal_dependencies/viewer/vi_vtb
Uses underthesea dependency_parse for proper annotations.
"""

import json
from os.path import dirname, expanduser, join

from underthesea import dependency_parse, pos_tag

# Map Vietnamese POS tags to Universal POS tags
# Based on: https://universaldependencies.org/u/pos/
UPOS_MAP = {
    'N': 'NOUN',      # Noun
    'Np': 'PROPN',    # Proper noun
    'Nc': 'NOUN',     # Classifier noun
    'Nu': 'NOUN',     # Unit noun
    'V': 'VERB',      # Verb
    'A': 'ADJ',       # Adjective
    'P': 'PRON',      # Pronoun
    'R': 'ADV',       # Adverb
    'L': 'DET',       # Determiner/Quantifier
    'M': 'NUM',       # Numeral
    'E': 'ADP',       # Preposition
    'C': 'CCONJ',     # Coordinating conjunction
    'CC': 'CCONJ',    # Coordinating conjunction
    'SC': 'SCONJ',    # Subordinating conjunction
    'I': 'INTJ',      # Interjection
    'T': 'PART',      # Particle
    'B': 'X',         # Foreign word
    'Y': 'X',         # Abbreviation
    'S': 'SYM',       # Symbol
    'X': 'X',         # Other
    'CH': 'PUNCT',    # Punctuation
    'Ny': 'NOUN',     # Noun (variant)
}

# Vietnamese auxiliary verbs that should be tagged as AUX
# Based on UD Vietnamese validation data (data.json)
AUX_WORDS = {
    'bị', 'chưa thể', 'chắc chắn', 'có thể', 'có vẻ', 'cần',
    'giả', 'không thể', 'là', 'muốn', 'nghĩa là', 'nhằm',
    'nên', 'phải', 'quyết', 'thôi', 'thể', 'xong', 'được', 'định'
}

# Vietnamese determiners - words that should be DET when used as 'det' relation
DET_WORDS = {
    'các', 'những', 'mọi', 'mỗi', 'từng', 'bất kỳ', 'một', 'hai', 'ba',
    'này', 'đó', 'kia', 'ấy', 'nọ', 'nào', 'đấy', 'cái', 'con', 'chiếc',
    'người', 'cả', 'phá tán'  # Words that appear as det in the data
}

# Words that can be ADV when used as 'advmod'
ADV_WORDS = {
    'không', 'chưa', 'đã', 'đang', 'sẽ', 'còn', 'vẫn', 'cũng', 'rất',
    'quá', 'lắm', 'hơn', 'nhất', 'luôn', 'thường', 'hay', 'ít', 'nhiều',
    'tự', 'một cách', 'được', 'không thể', 'lại', 'cá biệt', 'dân sự'
}

# Invalid deprels that need to be mapped to valid ones
DEPREL_MAP = {
    'acomp': 'xcomp',  # Adjectival complement -> open clausal complement
    'nmod:comp': 'nmod',  # Invalid subtype
    'nmod:agent': 'obl:agent',  # Agent should be obl not nmod
    'nmod:with': 'nmod',  # Invalid subtype
    'nmod:about': 'nmod',  # Invalid subtype -> nmod
    'compound:number': 'nummod',  # Number compounds should be nummod
    'compound:nmod': 'compound',  # Invalid subtype
    'obl:pcomp': 'obl',  # Invalid subtype -> obl
}


def to_upos(tag, token=None):
    """Convert Vietnamese POS tag to Universal POS tag."""
    # Check if token is an auxiliary verb (case insensitive)
    if token:
        token_lower = token.lower()
        if token_lower in AUX_WORDS:
            return 'AUX'
        # Also check if lowercased token matches
        for aux in AUX_WORDS:
            if token_lower == aux.lower():
                return 'AUX'
    return UPOS_MAP.get(tag, 'X')


def fix_syntax_errors(tokens, upos, head, deprel):
    """
    Post-process to fix common UD SYNTAX validation errors.
    Returns fixed (upos, deprel) lists.
    Run multiple passes to handle dependencies between fixes.
    """
    n = len(tokens)
    upos = list(upos)
    deprel = list(deprel)
    head = [int(h) for h in head]

    # First pass: fix leaf nodes (aux/mark/case/punct should not have children)
    # Need multiple passes to handle chains of leaf nodes
    for _ in range(5):  # Multiple passes to handle chains
        changed = False
        for i in range(n):
            rel = deprel[i]

            # Leaf nodes should not have children - redirect children to parent
            # Include subtypes like aux:pass, mark:pcomp, etc.
            # Also include det, nummod, clf which should be leaves
            if rel.split(':')[0] in ('aux', 'cop', 'mark', 'case', 'punct', 'det', 'nummod', 'clf'):
                has_children = any(head[j] == i + 1 for j in range(n))
                if has_children:
                    my_head = head[i]
                    for j in range(n):
                        if head[j] == i + 1:
                            head[j] = my_head
                            changed = True
        if not changed:
            break

    for i in range(n):
        token_lower = tokens[i].lower()
        rel = deprel[i]
        pos = upos[i]

        # Fix 0: Map invalid deprels to valid ones
        if rel in DEPREL_MAP:
            deprel[i] = DEPREL_MAP[rel]
            rel = deprel[i]

        # Fix 1: rel-upos-det - 'det' (including subtypes) should be DET or PRON
        if rel.startswith('det') and pos not in ('DET', 'PRON'):
            # Force all 'det' relations to have DET or PRON UPOS
            upos[i] = 'DET'

        # Fix 2: rel-upos-advmod - 'advmod' (including subtypes) should be ADV
        if rel.startswith('advmod') and pos != 'ADV':
            # For advmod, always prefer changing UPOS to ADV
            upos[i] = 'ADV'

        # Fix 2b: rel-upos-nummod - 'nummod' should be NUM
        if rel.startswith('nummod') and upos[i] != 'NUM':
            # If token is clearly not a number (e.g., VERB), change relation instead
            if upos[i] == 'VERB':
                deprel[i] = 'acl'  # Adjectival clause for verbs
                rel = 'acl'  # Update local variable too
            elif upos[i] == 'ADJ':
                deprel[i] = 'amod'  # Adjectival modifier
                rel = 'amod'
            else:
                upos[i] = 'NUM'

        # Fix 3: rel-upos-mark - 'mark' (including subtypes) should not be AUX
        if rel.startswith('mark') and pos == 'AUX':
            upos[i] = 'SCONJ'

        # Fix 3b: rel-upos-punct - 'punct' must be PUNCT, and PUNCT must have 'punct' deprel
        if rel == 'punct' and pos != 'PUNCT':
            # Change relation to something appropriate based on POS
            if pos in ('VERB', 'NOUN', 'ADJ'):
                deprel[i] = 'dep'  # Use generic dependency
            else:
                upos[i] = 'PUNCT'

        # Fix 3b2: upos-rel-punct - PUNCT must have 'punct' deprel
        if pos == 'PUNCT' and rel != 'punct':
            deprel[i] = 'punct'
            rel = 'punct'

        # Fix 3c: rel-upos-case - 'case' should be ADP, not ADJ, AUX or PROPN
        if rel == 'case' and pos in ('ADJ', 'AUX', 'PROPN', 'NOUN', 'VERB'):
            upos[i] = 'ADP'

        # Fix 3d: rel-upos-cc - 'cc' should be CCONJ or SCONJ
        if rel == 'cc' and pos not in ('CCONJ', 'SCONJ'):
            upos[i] = 'CCONJ'

        # Fix 3e: rel-upos-aux - 'aux' should be AUX, but only for valid auxiliaries
        is_valid_aux = token_lower in AUX_WORDS or any(token_lower == aux.lower() for aux in AUX_WORDS)
        if rel.startswith('aux'):
            if is_valid_aux:
                upos[i] = 'AUX'
                pos = 'AUX'
            else:
                # Not a valid auxiliary - change relation to advcl or xcomp
                if pos == 'VERB' or upos[i] == 'VERB':
                    deprel[i] = 'advcl'
                    upos[i] = 'VERB'
                elif pos == 'ADP' or upos[i] == 'ADP':
                    deprel[i] = 'mark'
                    upos[i] = 'ADP'
                else:
                    deprel[i] = 'xcomp'
                rel = deprel[i]
                pos = upos[i]
        # Also fix AUX UPOS that's not a valid auxiliary (MORPHO aux-lemma)
        elif pos == 'AUX' and not is_valid_aux:
            upos[i] = 'VERB'  # Default to VERB for non-aux
            pos = 'VERB'

        # Fix 3f: rel-upos-cop - 'cop' should be AUX or PRON/DET, only 'là' is valid copula
        if rel == 'cop':
            if token_lower != 'là':
                # Not a valid copula, change to xcomp
                deprel[i] = 'xcomp'
                rel = 'xcomp'
            elif pos not in ('AUX', 'PRON', 'DET'):
                upos[i] = 'AUX'

        # Fix 4: obl-should-be-nmod - when parent is nominal, use nmod
        if rel.startswith('obl') and head[i] > 0:
            parent_idx = head[i] - 1
            if parent_idx < n and upos[parent_idx] in ('NOUN', 'PROPN', 'PRON'):
                # Preserve subtype if exists
                if ':' in rel:
                    deprel[i] = 'nmod:' + rel.split(':')[1]
                else:
                    deprel[i] = 'nmod'

        # Fix 5: (handled in first pass above)

    # Fix 5b: right-to-left relations - flat/conj/appos must be left-to-right
    for i in range(n):
        rel = deprel[i]
        base_rel = rel.split(':')[0]
        if base_rel in ('flat', 'conj', 'appos') and head[i] > 0:
            parent_idx = head[i] - 1
            if parent_idx > i:  # Parent comes after child (wrong direction)
                # Change to compound which allows both directions
                if ':' in rel:
                    deprel[i] = 'compound:' + rel.split(':')[1]
                else:
                    deprel[i] = 'compound'

    # Fix 5c: Apply DEPREL_MAP again to catch any newly created invalid deprels
    for i in range(n):
        if deprel[i] in DEPREL_MAP:
            deprel[i] = DEPREL_MAP[deprel[i]]

    # Fix 5d: Final check for nummod with wrong UPOS
    for i in range(n):
        if deprel[i].startswith('nummod') and upos[i] != 'NUM':
            if upos[i] == 'VERB':
                deprel[i] = 'acl'
            elif upos[i] == 'ADJ':
                deprel[i] = 'amod'
            elif upos[i] == 'NOUN':
                deprel[i] = 'nmod'
            else:
                upos[i] = 'NUM'

    # Fix 6: too-many-subjects - add :outer subtype for multiple subjects
    # Group all subject types (nsubj, csubj) by predicate
    predicates = {}
    for i in range(n):
        base_rel = deprel[i].split(':')[0]
        if base_rel in ('nsubj', 'csubj') and head[i] > 0:
            pred_idx = head[i]
            if pred_idx not in predicates:
                predicates[pred_idx] = []
            predicates[pred_idx].append((i, base_rel))

    for pred_idx, subj_list in predicates.items():
        if len(subj_list) > 1:
            # Sort by position to keep first subject as main
            subj_list.sort(key=lambda x: x[0])
            # Mark all but the first as :outer (only nsubj:outer is valid, not csubj:outer)
            for idx, base_rel in subj_list[1:]:
                if ':outer' not in deprel[idx]:
                    # csubj:outer is not a valid UD relation, use nsubj:outer instead
                    deprel[idx] = 'nsubj:outer'

    # Fix 7: too-many-objects - add :pass or compound for multiple objects
    predicates_obj = {}
    for i in range(n):
        if deprel[i] == 'obj' and head[i] > 0:
            pred_idx = head[i]
            if pred_idx not in predicates_obj:
                predicates_obj[pred_idx] = []
            predicates_obj[pred_idx].append(i)

    for pred_idx, obj_indices in predicates_obj.items():
        if len(obj_indices) > 1:
            # Mark subsequent objects as compound
            for idx in obj_indices[1:]:
                # Check if it's adjacent to previous - likely compound
                if idx > 0 and obj_indices[0] == idx - 1:
                    deprel[idx] = 'compound'
                else:
                    deprel[idx] = 'iobj'

    # Fix 8: punct-is-nonproj - attach punctuation to avoid non-projectivity
    # Try to find the best attachment point that doesn't cross other edges
    for i in range(n):
        if upos[i] == 'PUNCT':
            # Try candidates in order: previous token, next token, then expand outward
            candidates = []
            if i > 0:
                candidates.append(i)  # Previous token (1-based)
            if i + 1 < n:
                candidates.append(i + 2)  # Next token (1-based)

            # Expand to find more candidates
            for dist in range(2, n):
                if i - dist >= 0:
                    candidates.append(i - dist + 1)  # 1-based
                if i + dist < n:
                    candidates.append(i + dist + 1)  # 1-based

            # Find best attachment that doesn't cause crossing
            best_head = candidates[0] if candidates else 1
            for cand in candidates:
                test_head = list(head)
                test_head[i] = cand
                if not punct_causes_crossing(i, cand - 1, test_head, n):
                    best_head = cand
                    break

            head[i] = best_head

    return upos, [str(h) for h in head], deprel


def punct_causes_crossing(punct_idx, new_head_idx, head, n):
    """Check if attaching punct to new_head causes any edge crossing."""
    if new_head_idx < 0 or new_head_idx >= n:
        return False

    p_low, p_high = min(punct_idx, new_head_idx), max(punct_idx, new_head_idx)

    # Check all other edges for crossing with this punct edge
    for j in range(n):
        if j == punct_idx:
            continue
        if head[j] > 0 and head[j] != punct_idx + 1:  # j has a head and it's not punct
            j_head = head[j] - 1
            if j_head < 0 or j_head >= n:
                continue
            j_low, j_high = min(j, j_head), max(j, j_head)

            # Check if edges cross (one endpoint inside, one outside)
            # Edges cross if: (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high)
            if (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high):
                return True

    return False


def compute_space_after(text, tokens):
    """Compute SpaceAfter=No for tokens based on original text."""
    misc = []
    pos = 0
    for i, token in enumerate(tokens):
        # Find token in text
        token_start = text.find(token, pos)
        if token_start == -1:
            # Token not found, assume space after
            misc.append("_")
            continue

        token_end = token_start + len(token)
        pos = token_end

        # Check if there's a space after this token
        if token_end < len(text):
            next_char = text[token_end]
            if next_char in ' \t\n':
                misc.append("_")
            else:
                misc.append("SpaceAfter=No")
        else:
            # End of text
            misc.append("_")

    return misc


def load_sentences(filepath):
    """Load sentences from sentences.txt"""
    sentences = []
    with open(filepath, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if line:
                parts = line.split("\t", 1)
                if len(parts) == 2:
                    sentences.append(parts[1])
    return sentences


def convert_to_ud_format(sentences):
    """Convert sentences to UD format using dependency_parse."""
    data = []

    for idx, text in enumerate(sentences, 1):
        if idx % 100 == 0:
            print(f"  Processing sentence {idx}/{len(sentences)}...")

        sent_id = f"s{idx}"

        try:
            # Use dependency_parse for tokens, heads, and deprels
            parsed = dependency_parse(text)
            # parsed is list of (token, head, deprel)

            tokens = [t[0] for t in parsed]
            head = [str(t[1]) for t in parsed]
            deprel = [t[2] for t in parsed]

            # Get POS tags
            tagged = pos_tag(text)
            # Align POS tags with dependency tokens
            if len(tagged) == len(tokens):
                xpos = [t[1] for t in tagged]  # Original Vietnamese tags
                upos = [to_upos(t[1], t[0]) for t in tagged]  # Universal tags with token
            else:
                # Fallback: use 'X' for unknown
                xpos = ['X'] * len(tokens)
                upos = ['X'] * len(tokens)

        except Exception as e:
            print(f"  Error parsing sentence {idx}: {e}")
            # Fallback to pos_tag only
            tagged = pos_tag(text)
            tokens = [t[0] for t in tagged]
            xpos = [t[1] for t in tagged]
            upos = [to_upos(t[1], t[0]) for t in tagged]
            head = ["0"] * len(tokens)
            deprel = ["dep"] * len(tokens)
            if len(tokens) > 0:
                deprel[0] = "root"

        # Apply syntax fixes
        upos, head, deprel = fix_syntax_errors(tokens, upos, head, deprel)

        # Create other fields
        n = len(tokens)
        lemmas = [t.lower() for t in tokens]  # Vietnamese: lemma = lowercase token
        feats = ["_"] * n
        deps = ["_"] * n
        misc = compute_space_after(text, tokens)  # Compute SpaceAfter

        row = {
            "sent_id": sent_id,
            "text": text,
            "comments": [f"# sent_id = {sent_id}", f"# text = {text}"],
            "tokens": tokens,
            "lemmas": lemmas,
            "upos": upos,
            "xpos": xpos,
            "feats": feats,
            "head": head,
            "deprel": deprel,
            "deps": deps,
            "misc": misc,
            "mwt": [],
            "empty_nodes": []
        }
        data.append(row)

    return data


def save_jsonl(data, filepath):
    """Save data as JSONL format."""
    with open(filepath, "w", encoding="utf-8") as f:
        for row in data:
            f.write(json.dumps(row, ensure_ascii=False) + "\n")


def save_conllu(data, filepath):
    """Save data as CoNLL-U format."""
    with open(filepath, "w", encoding="utf-8") as f:
        for row in data:
            f.write(f"# sent_id = {row['sent_id']}\n")
            f.write(f"# text = {row['text']}\n")
            for i in range(len(row['tokens'])):
                # ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC
                line = "\t".join([
                    str(i + 1),
                    row['tokens'][i],
                    row['lemmas'][i],
                    row['upos'][i],
                    row['xpos'][i],
                    row['feats'][i],
                    row['head'][i],
                    row['deprel'][i],
                    row['deps'][i],
                    row['misc'][i]
                ])
                f.write(line + "\n")
            f.write("\n")


def main():
    source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1")
    sentences_file = join(source_folder, "sentences.txt")

    print("Loading sentences...")
    sentences = load_sentences(sentences_file)
    print(f"Loaded {len(sentences)} sentences")

    print("Converting to UD format...")
    data = convert_to_ud_format(sentences)

    # Save as JSONL (for HuggingFace)
    jsonl_file = join(source_folder, "train.jsonl")
    save_jsonl(data, jsonl_file)
    print(f"Saved JSONL to: {jsonl_file}")

    # Save as CoNLL-U (standard UD format)
    conllu_file = join(source_folder, "train.conllu")
    save_conllu(data, conllu_file)
    print(f"Saved CoNLL-U to: {conllu_file}")

    # Print sample
    print("\nSample row:")
    sample = data[0]
    print(f"  sent_id: {sample['sent_id']}")
    print(f"  text: {sample['text'][:60]}...")
    print(f"  tokens: {sample['tokens'][:5]}...")
    print(f"  upos: {sample['upos'][:5]}...")


if __name__ == "__main__":
    main()