Datasets:
Add v1.1 word segmentation dataset, annotation guidelines, and research references
Browse files- Add 100K-sentence word segmentation dataset in BIO format (udd-ws-v1.1-{train,dev,test})
- Add raw sentence files for 5 domains (ws_sentences_*.txt)
- Add word segmentation annotation guideline and segmentation evaluation report
- Add technical report v1.0 and review documents
- Add active learning research references and Vietnamese UD annotation references
- Add sentence selection guidelines
- Move scripts/ to src/ (udtools, gpu_stats, run_conversion, etc.)
- Track large training files with Git LFS
This view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +2 -0
- ANNOTATION_GUIDELINE_WORD_SEGMENTATION.md +505 -0
- SEGMENTATION_EVAL.md +1017 -0
- TECHNICAL_REPORT.md +351 -0
- TECHNICAL_REPORT_REVIEW.md +196 -0
- scripts/udtools/tests/test-cases/valid/empty-file.conllu → active_learning/README.md +0 -0
- active_learning/references/research_active_learning_ud/README.md +184 -0
- active_learning/references/research_active_learning_ud/bibliography.bib +224 -0
- active_learning/references/research_active_learning_ud/comparison.md +68 -0
- active_learning/references/research_active_learning_ud/guideline_development.md +358 -0
- active_learning/references/research_active_learning_ud/papers.md +300 -0
- guidelines/00. Sentence Selection.md +142 -0
- references/2016.lrec.nguyen/paper.md +674 -0
- references/2016.lrec.nguyen/paper.pdf +0 -0
- references/2018.lre.nguyen/paper.md +72 -0
- references/research_vietnamese_dep_parsing/README.md +120 -0
- references/research_vietnamese_dep_parsing/bibliography.bib +436 -0
- references/research_vietnamese_dep_parsing/comparison.md +75 -0
- references/research_vietnamese_dep_parsing/papers.md +295 -0
- references/research_vietnamese_dep_parsing/sota.md +70 -0
- references/research_vietnamese_ud_annotation/README.md +139 -0
- references/research_vietnamese_ud_annotation/bibliography.bib +185 -0
- references/research_vietnamese_ud_annotation/comparison.md +137 -0
- references/research_vietnamese_ud_annotation/guidelines_summary.md +248 -0
- references/research_vietnamese_ud_annotation/papers.md +177 -0
- scripts/convert_to_ud.py +0 -655
- scripts/fetch_data.py +0 -115
- scripts/fetch_uvb_data.py +0 -250
- scripts/upload_to_hf.py +0 -80
- src/eval_segmentation.py +1021 -0
- {scripts → src}/gpu_stats.py +0 -0
- {scripts → src}/run_conversion.sh +0 -0
- {scripts → src}/run_on_runpod.py +0 -0
- {scripts → src}/statistics.py +0 -0
- {scripts → src}/udtools/LICENSE.txt +0 -0
- {scripts → src}/udtools/MANIFEST.in +0 -0
- {scripts → src}/udtools/README.md +0 -0
- {scripts → src}/udtools/pyproject.toml +0 -0
- {scripts → src}/udtools/requirements.txt +0 -0
- {scripts → src}/udtools/src/udtools/__init__.py +0 -0
- {scripts → src}/udtools/src/udtools/argparser.py +0 -0
- {scripts → src}/udtools/src/udtools/cli.py +0 -0
- {scripts → src}/udtools/src/udtools/data.py +0 -0
- {scripts → src}/udtools/src/udtools/incident.py +0 -0
- {scripts → src}/udtools/src/udtools/level1.py +0 -0
- {scripts → src}/udtools/src/udtools/level2.py +0 -0
- {scripts → src}/udtools/src/udtools/level3.py +0 -0
- {scripts → src}/udtools/src/udtools/level4.py +0 -0
- {scripts → src}/udtools/src/udtools/level5.py +0 -0
- {scripts → src}/udtools/src/udtools/level6.py +0 -0
.gitattributes
CHANGED
|
@@ -58,3 +58,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
udd1_train.conllu filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
udd1_train.conllu filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
udd-ws-v1.1-train.conllu filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
udd-ws-v1.1-train.txt filter=lfs diff=lfs merge=lfs -text
|
ANNOTATION_GUIDELINE_WORD_SEGMENTATION.md
ADDED
|
@@ -0,0 +1,505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Annotation Guideline: Word Segmentation for UDD-1
|
| 2 |
+
|
| 3 |
+
## 1. Background
|
| 4 |
+
|
| 5 |
+
Vietnamese is an isolating language where spaces separate **syllables** (tiếng), not **words** (từ). A single word can consist of multiple syllables separated by spaces (e.g., *học sinh* "student" = 2 syllables, 1 word). This makes word segmentation a non-trivial task that directly affects all downstream annotations in UDD-1.
|
| 6 |
+
|
| 7 |
+
UDD-1 relies on Underthesea's `dependency_parse()` for implicit word segmentation. The tokenizer uses the same model as `word_tokenize()`, trained primarily on news-domain data (VLSP). This document establishes guidelines for evaluating and improving segmentation quality.
|
| 8 |
+
|
| 9 |
+
## 2. Reference Standards
|
| 10 |
+
|
| 11 |
+
### 2.1 VLSP Guidelines (Primary Standard)
|
| 12 |
+
|
| 13 |
+
The VLSP (Vietnamese Language and Speech Processing) organization, led by Nguyễn Thị Minh Huyền at VNU Hanoi, established the de facto standard through their shared tasks:
|
| 14 |
+
|
| 15 |
+
- **VLSP 2013 Word Segmentation Shared Task**: 75,000 manually segmented sentences from online news
|
| 16 |
+
- **Reference document**: "Hướng dẫn nhận diện đơn vị từ trong văn bản tiếng Việt" (Guidelines for Identifying Word Units in Vietnamese Text)
|
| 17 |
+
|
| 18 |
+
### 2.2 NIIVTB 9 Rules (Quy Nguyen et al., 2016/2018)
|
| 19 |
+
|
| 20 |
+
The most comprehensive published guidelines come from the NIIVTB treebank project, achieving >90% inter-annotator agreement on ~20,588 sentences. Nine rules govern word boundary decisions (see Section 3).
|
| 21 |
+
|
| 22 |
+
**Key references:**
|
| 23 |
+
- Nguyen et al. (2016). Challenges and Solutions for Consistent Annotation of Vietnamese Treebank. *LREC 2016*. https://aclanthology.org/L16-1243/
|
| 24 |
+
- Nguyen et al. (2018). Ensuring Annotation Consistency and Accuracy for Vietnamese Treebank. *Language Resources and Evaluation*, Springer. https://link.springer.com/article/10.1007/s10579-017-9398-3
|
| 25 |
+
|
| 26 |
+
### 2.3 Universal Dependencies Policy
|
| 27 |
+
|
| 28 |
+
UD v2 allows words with spaces in Vietnamese treebanks (https://universaldependencies.org/v2/segmentation.html). The rationale: treating all polysyllabic words as multiword expressions would artificially distort Vietnamese syntax. UD_Vietnamese-VTB contains 4,152 word types with spaces.
|
| 29 |
+
|
| 30 |
+
### 2.4 Other References
|
| 31 |
+
|
| 32 |
+
- Nguyen Thi Minh Huyen et al. (2008). Word Segmentation of Vietnamese Texts: a Comparison of Approaches. *LREC 2008*. https://aclanthology.org/L08-1355/
|
| 33 |
+
- Nguyen et al. (2012). Comparing Different Criteria for Vietnamese Word Segmentation. https://aclanthology.org/W12-5005/
|
| 34 |
+
- Dinh Dien (2008). Vietnamese Word Segmentation. *ResearchGate*.
|
| 35 |
+
|
| 36 |
+
## 3. Decision Criteria
|
| 37 |
+
|
| 38 |
+
### 3.1 Core Principles (from NIIVTB/VLSP)
|
| 39 |
+
|
| 40 |
+
The following criteria determine whether a multi-syllable expression AB is ONE word or MULTIPLE words:
|
| 41 |
+
|
| 42 |
+
#### Rule 1: Insertability Test (Primary)
|
| 43 |
+
|
| 44 |
+
Can another word be inserted between A and B without changing the grammatical relationship?
|
| 45 |
+
|
| 46 |
+
- **YES → separate words** (phrase): *ăn cơm* → *ăn một bát cơm* (eat rice → eat a bowl of rice)
|
| 47 |
+
- **NO → one word** (compound): *xe đạp* → \**xe X đạp* (bicycle — cannot insert)
|
| 48 |
+
|
| 49 |
+
#### Rule 2: Semantic Opacity
|
| 50 |
+
|
| 51 |
+
If the combined meaning of AB differs from the sum of A and B individually → **one word**.
|
| 52 |
+
|
| 53 |
+
- *bồ đào* (grape) — neither *bồ* nor *đào* means grape → one word
|
| 54 |
+
- *bộ đội* (soldier) — meaning is not derivable from parts → one word
|
| 55 |
+
|
| 56 |
+
#### Rule 3: Semantic Generalization (Coordinate Compounds)
|
| 57 |
+
|
| 58 |
+
If A and B have different meanings but AB has a more general meaning encompassing both → **one word**.
|
| 59 |
+
|
| 60 |
+
- *quần áo* (clothing) from *quần* (trousers) + *áo* (shirt) → one word
|
| 61 |
+
- *cha mẹ* (parents) from *cha* (father) + *mẹ* (mother) → one word
|
| 62 |
+
|
| 63 |
+
#### Rule 4: Semantic Narrowing (Subordinate Compounds)
|
| 64 |
+
|
| 65 |
+
If AB has the same meaning as A or B but more specific → **one word**.
|
| 66 |
+
|
| 67 |
+
- *xe đạp* (bicycle) from *xe* (vehicle) + *đạp* (pedal) → one word
|
| 68 |
+
- *máy bay* (airplane) from *máy* (machine) + *bay* (fly) → one word
|
| 69 |
+
|
| 70 |
+
#### Rule 5: Synonymous Components
|
| 71 |
+
|
| 72 |
+
If A and B have the same or similar meanings → **one word**.
|
| 73 |
+
|
| 74 |
+
- *to lớn* (big) from *to* (big) + *lớn* (large) → one word
|
| 75 |
+
|
| 76 |
+
#### Rule 6: Bound Morpheme
|
| 77 |
+
|
| 78 |
+
If A or B cannot stand alone as an independent word → **one word**.
|
| 79 |
+
|
| 80 |
+
- *học sinh* (student) — *sinh* in this sense is bound → one word
|
| 81 |
+
- *quốc gia* (nation) — *quốc* is a Sino-Vietnamese bound morpheme → one word
|
| 82 |
+
|
| 83 |
+
#### Rule 7: Dictionary Evidence
|
| 84 |
+
|
| 85 |
+
If the expression appears as a single entry in a standard Vietnamese dictionary → **one word**.
|
| 86 |
+
|
| 87 |
+
#### Rule 8: Stress Pattern
|
| 88 |
+
|
| 89 |
+
Vietnamese compound words have a characteristic stress pattern (typically final-syllable stress) that differs from phrase-level stress.
|
| 90 |
+
|
| 91 |
+
#### Rule 9: Syntactic Behavior
|
| 92 |
+
|
| 93 |
+
Words cannot be internally modified or expanded; phrases can.
|
| 94 |
+
|
| 95 |
+
- *nhà máy* (factory) — cannot modify *nhà* independently → one word
|
| 96 |
+
- *nhà đẹp* (beautiful house) — *nhà* can be modified → two words
|
| 97 |
+
|
| 98 |
+
### 3.2 Decision Flowchart
|
| 99 |
+
|
| 100 |
+
```
|
| 101 |
+
1. Is the expression in the dictionary as a single entry?
|
| 102 |
+
YES → one word
|
| 103 |
+
NO → continue
|
| 104 |
+
|
| 105 |
+
2. Are any syllables bound morphemes (cannot stand alone)?
|
| 106 |
+
YES → one word
|
| 107 |
+
NO → continue
|
| 108 |
+
|
| 109 |
+
3. Can you insert other words between the syllables?
|
| 110 |
+
YES → separate words (phrase)
|
| 111 |
+
NO → continue
|
| 112 |
+
|
| 113 |
+
4. Is the combined meaning different from (or more general/specific than) the parts?
|
| 114 |
+
YES → one word (compound)
|
| 115 |
+
NO → separate words
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
## 4. Vietnamese Compound Word Types
|
| 119 |
+
|
| 120 |
+
| Type | Vietnamese | Description | Example |
|
| 121 |
+
|:---|:---|:---|:---|
|
| 122 |
+
| Từ đơn | Single-syllable | One syllable = one word | *nhà* (house), *ăn* (eat) |
|
| 123 |
+
| Từ ghép đẳng lập | Coordinate compound | Equal-status components, generalized meaning | *quần áo* (clothing), *bàn ghế* (furniture) |
|
| 124 |
+
| Từ ghép chính phụ | Subordinate compound | Head + modifier, narrowed meaning | *xe đạp* (bicycle), *máy bay* (airplane) |
|
| 125 |
+
| Từ láy | Reduplicated word | Full or partial reduplication | *xanh xanh* (somewhat blue) |
|
| 126 |
+
|
| 127 |
+
## 5. Legal Domain Specific Guidelines
|
| 128 |
+
|
| 129 |
+
Legal text in UDD-1 contains fixed multi-syllable terms that should be treated as single words. These terms are institutionalized and do not allow insertion.
|
| 130 |
+
|
| 131 |
+
### 5.1 Institutional Names (Always Single Token)
|
| 132 |
+
|
| 133 |
+
| Term | Translation | Syllables | Guideline |
|
| 134 |
+
|:---|:---|---:|:---|
|
| 135 |
+
| Tòa án nhân dân | People's Court | 4 | Single token — fixed institutional name |
|
| 136 |
+
| Viện kiểm sát nhân dân | People's Procuracy | 5 | Single token |
|
| 137 |
+
| Ủy ban nhân dân | People's Committee | 4 | Single token |
|
| 138 |
+
| Hội đồng nhân dân | People's Council | 4 | Single token |
|
| 139 |
+
| Mặt trận Tổ quốc Việt Nam | Vietnam Fatherland Front | 6 | Single token |
|
| 140 |
+
|
| 141 |
+
### 5.2 Legal Compound Terms (Always Single Token)
|
| 142 |
+
|
| 143 |
+
| Term | Translation | Syllables | Reasoning |
|
| 144 |
+
|:---|:---|---:|:---|
|
| 145 |
+
| tố tụng dân sự | civil procedure | 4 | Fixed legal term, no insertion possible |
|
| 146 |
+
| tố tụng hình sự | criminal procedure | 4 | Fixed legal term |
|
| 147 |
+
| quản lý nhà nước | state management | 4 | Fixed administrative term |
|
| 148 |
+
| bảo hiểm xã hội | social insurance | 4 | Fixed term |
|
| 149 |
+
| bảo hiểm y tế | health insurance | 4 | Fixed term |
|
| 150 |
+
| phương tiện thông tin đại chúng | mass media | 6 | Fixed term (see Section 5.4) |
|
| 151 |
+
| giám đốc thẩm | cassation review | 3 | Fixed legal term |
|
| 152 |
+
| sở hữu toàn dân | people's ownership | 4 | Fixed legal concept |
|
| 153 |
+
| kết cấu hạ tầng | infrastructure | 4 | Fixed term |
|
| 154 |
+
| quy phạm pháp luật | legal norm | 4 | Fixed legal term |
|
| 155 |
+
|
| 156 |
+
### 5.3 Two-Syllable Legal Terms (Segmentation Consistency)
|
| 157 |
+
|
| 158 |
+
From the UDD-1 segmentation evaluation (`SEGMENTATION_EVAL.md`), most two-syllable legal terms are consistently segmented as single tokens:
|
| 159 |
+
|
| 160 |
+
| Term | As Single | As Split | Consistency | Note |
|
| 161 |
+
|:---|---:|---:|:---|:---|
|
| 162 |
+
| quy định | 2,230 | 0 | 100% single | Correct |
|
| 163 |
+
| tài sản | 1,516 | 0 | 100% single | Correct |
|
| 164 |
+
| pháp luật | 1,154 | 0 | 100% single | Correct |
|
| 165 |
+
| hợp đồng | 608 | 0 | 100% single | Correct |
|
| 166 |
+
| nghĩa vụ | 1,025 | 0 | 100% single | Correct |
|
| 167 |
+
| vụ án | 0 | 892 | 100% split | **Incorrect** — should be single token |
|
| 168 |
+
|
| 169 |
+
**Notable issue**: *vụ án* (lawsuit/case) appears 892 times always as two separate tokens (*vụ* + *án*). According to guidelines, this should be a single token — it is a subordinate compound (*vụ* = case + *án* = legal case/judgment), listed in dictionaries, and does not allow insertion.
|
| 170 |
+
|
| 171 |
+
### 5.4 Case Study: "phương tiện thông tin đại chúng"
|
| 172 |
+
|
| 173 |
+
**Structure analysis:**
|
| 174 |
+
```
|
| 175 |
+
phương tiện thông tin đại chúng (mass media)
|
| 176 |
+
├── phương tiện (means/vehicle) — subordinate compound
|
| 177 |
+
└── thông tin đại chúng (mass information)
|
| 178 |
+
├── thông tin (information) — subordinate compound
|
| 179 |
+
└── đại chúng (mass/public) — coordinate compound
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
**Applying criteria:**
|
| 183 |
+
|
| 184 |
+
| Criterion | Result |
|
| 185 |
+
|:---|:---|
|
| 186 |
+
| Insertability | Cannot insert: \**phương tiện X thông tin đại chúng* |
|
| 187 |
+
| Semantic opacity | Fixed term meaning "mass media", not compositional |
|
| 188 |
+
| Dictionary | Listed as single entry in Vietnamese dictionaries |
|
| 189 |
+
| Syntactic behavior | Cannot internally modify components |
|
| 190 |
+
|
| 191 |
+
**Decision: ONE word (single token)**
|
| 192 |
+
|
| 193 |
+
This is consistent with UDD-1's current segmentation where it appears 14 times as a single 6-syllable token. When *thông tin đại chúng* appears alone (e.g., in enumerative lists like "cơ quan y tế, văn hóa, giáo dục, thông tin đại chúng"), it should also be treated as a single 4-syllable token.
|
| 194 |
+
|
| 195 |
+
## 6. Known Segmentation Issues in UDD-1
|
| 196 |
+
|
| 197 |
+
From the quantitative evaluation (`SEGMENTATION_EVAL.md`):
|
| 198 |
+
|
| 199 |
+
### 6.1 Cross-Boundary Merges (204 occurrences)
|
| 200 |
+
|
| 201 |
+
Tokens where the tokenizer incorrectly merges words across phrase/sentence boundaries, identifiable by uppercase letters mid-token:
|
| 202 |
+
|
| 203 |
+
| Token | Count | Correct Segmentation |
|
| 204 |
+
|:---|---:|:---|
|
| 205 |
+
| xã hội chủ nghĩa Việt Nam | 89 | Legitimate (fixed term) |
|
| 206 |
+
| Bộ Tư pháp | 11 | Legitimate (institutional name) |
|
| 207 |
+
| tố tụng Người | 5 | **Error**: *tố tụng* + *Người* |
|
| 208 |
+
| kề Quyền | 4 | **Error**: *kề* + *Quyền* |
|
| 209 |
+
| thường vụ Quốc hội | 5 | **Error**: *thường vụ* + *Quốc hội* |
|
| 210 |
+
| giám đốc thẩm Trong | 2 | **Error**: *giám đốc thẩm* + *Trong* |
|
| 211 |
+
|
| 212 |
+
### 6.2 Inconsistent Segmentation (158 forms)
|
| 213 |
+
|
| 214 |
+
Words that appear both as single tokens and as split bigrams in different contexts:
|
| 215 |
+
|
| 216 |
+
| Token | As Single | As Split | Expected |
|
| 217 |
+
|:---|---:|---:|:---|
|
| 218 |
+
| phiên tòa | 18 | 576 | Single token (compound) |
|
| 219 |
+
| tàu biển | 524 | 32 | Single token (compound) |
|
| 220 |
+
| hàng hóa | 6 | 175 | Single token (compound) |
|
| 221 |
+
| sức khỏe | 120 | 44 | Single token (compound) |
|
| 222 |
+
| ủy quyền | 79 | 67 | Single token (compound) |
|
| 223 |
+
| hòa giải | 30 | 53 | Single token (compound) |
|
| 224 |
+
| bị hại | 6 | 96 | Single token (compound) |
|
| 225 |
+
|
| 226 |
+
### 6.3 Always-Split Terms (Likely Errors)
|
| 227 |
+
|
| 228 |
+
| Term | Occurrences (split) | Expected |
|
| 229 |
+
|:---|---:|:---|
|
| 230 |
+
| vụ án | 892 | Single token |
|
| 231 |
+
| phiên tòa (majority) | 576 | Single token |
|
| 232 |
+
| hàng hóa (majority) | 175 | Single token |
|
| 233 |
+
| chủ tọa | 125 | Single token |
|
| 234 |
+
| trở lên | 235 | Context-dependent |
|
| 235 |
+
|
| 236 |
+
## 7. Comparison of Segmentation Tools
|
| 237 |
+
|
| 238 |
+
| Tool | Accuracy (benchmark) | Approach | Notes |
|
| 239 |
+
|:---|---:|:---|:---|
|
| 240 |
+
| Underthesea | ~80% | CRF/Neural | Used in UDD-1; highest accuracy |
|
| 241 |
+
| VnCoreNLP | ~78% | RDRsegmenter | Java-based; could serve as comparison |
|
| 242 |
+
| PyVi (SpaCy) | ~58% | Rule-based | Fastest but lowest accuracy |
|
| 243 |
+
|
| 244 |
+
All tools achieve substantially lower accuracy on out-of-domain text (e.g., legal domain) compared to news benchmarks.
|
| 245 |
+
|
| 246 |
+
**Key finding**: Underthesea's `dependency_parse()` and `word_tokenize()` share the same tokenizer — comparison between them yields 100% match (verified on 300 sampled sentences). Meaningful evaluation requires an independent tool or gold-standard segmented data.
|
| 247 |
+
|
| 248 |
+
## 8. Dictionary-Based Validation Methodology
|
| 249 |
+
|
| 250 |
+
### 8.1 Approach
|
| 251 |
+
|
| 252 |
+
Since Underthesea's `dependency_parse()` and `word_tokenize()` share the same tokenizer (100% match on 300 sentences), comparing them cannot detect segmentation errors. Instead, we use the **Viet74K dictionary** (72,535 entries from Ho Ngoc Duc's Free Vietnamese Dictionary Project, bundled in Underthesea) as an independent reference to detect potential errors.
|
| 253 |
+
|
| 254 |
+
Three validation checks:
|
| 255 |
+
|
| 256 |
+
| Check | What it detects | Interpretation |
|
| 257 |
+
|:---|:---|:---|
|
| 258 |
+
| **Token coverage** | Tokens not in dictionary (OOV) | OOV may indicate segmentation error or domain-specific term |
|
| 259 |
+
| **Under-segmentation** | Multi-syllable OOV whose sub-parts are all in dictionary | Possible over-merging by tokenizer |
|
| 260 |
+
| **Over-segmentation** | Adjacent tokens that form a dictionary word | Possible under-merging by tokenizer |
|
| 261 |
+
|
| 262 |
+
### 8.2 Available Dictionaries
|
| 263 |
+
|
| 264 |
+
| Resource | Entries | Source | Access |
|
| 265 |
+
|:---|---:|:---|:---|
|
| 266 |
+
| **Viet74K** | 72,535 | Ho Ngoc Duc (bundled in Underthesea) | `from underthesea.corpus import viet_dict_74K` |
|
| 267 |
+
| **UTS_Dictionary** | 72,547 | HuggingFace (Apache 2.0) | `from underthesea.datasets.uts_dictionary import UTSDictionary` |
|
| 268 |
+
| **Dictionary with POS** | 31,327 | Underthesea (pickle format) | `from underthesea.dictionary import Dictionary` |
|
| 269 |
+
| **RDRsegmenter VnVocab** | 32,450 | VnCoreNLP | Java serialized HashSet |
|
| 270 |
+
| **PyVi word list** | 31,158 | MIT license | `pyvi/models/words.txt` |
|
| 271 |
+
| **Kaikki Wiktionary** | 42,351 | CC BY-SA 3.0 | JSONL download from kaikki.org |
|
| 272 |
+
| **undertheseanlp/dictionary** | 79,226 | Merged from 3 sources (GPL-3.0) | GitHub |
|
| 273 |
+
|
| 274 |
+
Viet74K is recommended as primary because: (1) zero setup, (2) largest single-source coverage, (3) 87.8% multi-syllable entries, and (4) same project ecosystem as UDD-1.
|
| 275 |
+
|
| 276 |
+
### 8.3 Results on UDD-1
|
| 277 |
+
|
| 278 |
+
Running `python3 src/eval_segmentation.py --all-files --dict-validate` (full details in `SEGMENTATION_EVAL.md` Section 6):
|
| 279 |
+
|
| 280 |
+
**Token coverage**: 98.2% (226,483 / 230,709 tokens in dictionary). ADJ has the lowest coverage (90.3%), while function word categories (AUX, CCONJ, SCONJ, PUNCT) have 100%.
|
| 281 |
+
|
| 282 |
+
**Possible over-merges** (under-segmentation errors): 382 unique multi-syllable OOV forms (2,231 occurrences). Top candidates:
|
| 283 |
+
|
| 284 |
+
| Token | Count | Likely Status |
|
| 285 |
+
|:---|---:|:---|
|
| 286 |
+
| việt nam | 368 | **True OOV** — proper noun not in dict |
|
| 287 |
+
| tố tụng dân sự | 85 | **True OOV** — legal compound not in dict |
|
| 288 |
+
| điều tra viên | 73 | **True OOV** — legal term |
|
| 289 |
+
| thụ lý | 67 | **True OOV** — legal term |
|
| 290 |
+
| tòa án nhân dân | 64 | **True OOV** — institutional name |
|
| 291 |
+
| phiên tòa | 18 | **Possible error** — common word not in dict |
|
| 292 |
+
| a khoản | 27 | **Likely error** — cross-boundary merge |
|
| 293 |
+
|
| 294 |
+
**Possible under-merges** (over-segmentation errors): 462 unique dictionary words found split (7,373 occurrences). Top candidates:
|
| 295 |
+
|
| 296 |
+
| Dictionary Word | Times Split | Likely Status |
|
| 297 |
+
|:---|---:|:---|
|
| 298 |
+
| vụ án | 892 | **True error** — compound always split |
|
| 299 |
+
| phạt tù | 422 | **True error** — compound always split |
|
| 300 |
+
| hủy bỏ | 147 | **True error** — compound always split |
|
| 301 |
+
| chữa bệnh | 112 | **True error** — compound always split |
|
| 302 |
+
| sức khỏe | 44 | **True error** — sometimes split, sometimes merged |
|
| 303 |
+
| bên có | 167 | **False positive** — accidental bigram match |
|
| 304 |
+
| có người | 84 | **False positive** — accidental bigram match |
|
| 305 |
+
|
| 306 |
+
### 8.4 Limitations of Dictionary-Based Validation
|
| 307 |
+
|
| 308 |
+
1. **False positives in over-segmentation**: The dictionary contains some multi-word entries that are also valid as separate words in context. For example, *bên có* (party that has) can be two independent words but is also a dictionary entry meaning "creditor." Context is needed to distinguish.
|
| 309 |
+
|
| 310 |
+
2. **Missing legal terms**: Viet74K lacks many legal compound terms (*tố tụng dân sự*, *phiên tòa*, *ủy quyền*), causing them to appear as OOV even though they are correctly segmented.
|
| 311 |
+
|
| 312 |
+
3. **Dictionary granularity mismatch**: The dictionary may include entries at a different granularity than the treebank's intended segmentation level.
|
| 313 |
+
|
| 314 |
+
4. **No context sensitivity**: Dictionary lookup cannot determine whether a word form is correctly segmented in its specific syntactic context.
|
| 315 |
+
|
| 316 |
+
### 8.5 Using the Tool
|
| 317 |
+
|
| 318 |
+
```bash
|
| 319 |
+
# Full evaluation with dictionary validation
|
| 320 |
+
python3 src/eval_segmentation.py --all-files --dict-validate -o SEGMENTATION_EVAL.md
|
| 321 |
+
|
| 322 |
+
# Combined with word_tokenize comparison
|
| 323 |
+
python3 src/eval_segmentation.py --all-files --dict-validate --compare-tokenize -o SEGMENTATION_EVAL.md
|
| 324 |
+
```
|
| 325 |
+
|
| 326 |
+
## 9. Cross-Linguistic Comparison of Word Segmentation
|
| 327 |
+
|
| 328 |
+
### 8.1 The Universal Problem
|
| 329 |
+
|
| 330 |
+
Word segmentation is not unique to Vietnamese. Multiple languages face the challenge of determining word boundaries, though the nature of the problem varies by writing system and typology.
|
| 331 |
+
|
| 332 |
+
| Language | Writing System | Spaces? | Problem Type |
|
| 333 |
+
|:---|:---|:---|:---|
|
| 334 |
+
| Vietnamese | Latin + diacritics | Between syllables | Spaces inside words |
|
| 335 |
+
| Chinese | Hanzi | None | No boundary markers |
|
| 336 |
+
| Japanese | Kanji + Kana | None | No boundary markers, multiple scripts |
|
| 337 |
+
| Thai | Thai script | Between clauses only | No word-level markers |
|
| 338 |
+
| Korean | Hangul | Between eojeols | Morpheme boundaries inside eojeols |
|
| 339 |
+
| Khmer | Khmer script | None (decorative) | No boundary markers |
|
| 340 |
+
| Myanmar | Myanmar script | None | Two-stage (syllable → word) |
|
| 341 |
+
|
| 342 |
+
Vietnamese is unique in UD: it is the only language where spaces occur **inside** words as a fundamental property (not exceptions). UD v2 explicitly accommodates this.
|
| 343 |
+
|
| 344 |
+
### 8.2 Chinese (Mandarin)
|
| 345 |
+
|
| 346 |
+
**Standards**: Multiple competing standards with different granularity:
|
| 347 |
+
- **Penn Chinese Treebank (CTB)**: Syntactic atom definition — includes compounds
|
| 348 |
+
- **Peking University (PKU)**: Meaning-based, finer-grained
|
| 349 |
+
- **Microsoft Research (MSR)**: Coarser-grained, keeps longer units
|
| 350 |
+
- **Academia Sinica (AS)**: Traditional Chinese standard (Taiwan)
|
| 351 |
+
|
| 352 |
+
**SIGHAN Bakeoffs** (2003--2008): Landmark shared tasks establishing evaluation methodology. Modern BERT-based systems achieve 97--99% F1, up from ~95% in early bakeoffs.
|
| 353 |
+
|
| 354 |
+
**Key tools**: jieba (dictionary + HMM), HanLP (BERT-based, ~98% F1), Stanza (BiLSTM-CRF), pkuseg (domain-adaptive).
|
| 355 |
+
|
| 356 |
+
**UD handling**: UD_Chinese-GSD uses morpheme-based segmentation. Trankit achieves 97.75 F1 on zh_gsd.
|
| 357 |
+
|
| 358 |
+
**Multi-criteria learning** (Huang et al., 2019): A single model trained to segment according to multiple standards simultaneously. This approach could be adapted for Vietnamese to handle different segmentation granularities.
|
| 359 |
+
|
| 360 |
+
**Parallel to Vietnamese**: Both are isolating languages with no morphological inflection. Both face ambiguity at compound word boundaries. Chinese has more mature tools (97--99% F1 vs ~80--98% for Vietnamese depending on benchmark).
|
| 361 |
+
|
| 362 |
+
### 8.3 Japanese
|
| 363 |
+
|
| 364 |
+
**Three-layer segmentation hierarchy** (BCCWJ corpus):
|
| 365 |
+
|
| 366 |
+
| Level | Name | Granularity | Example: 東京都立大学 |
|
| 367 |
+
|:---|:---|:---|:---|
|
| 368 |
+
| Fine | Short Unit Word (SUW) | Morpheme-level | 東京 / 都立 / 大学 (3 tokens) |
|
| 369 |
+
| Medium | Long Unit Word (LUW) | Compound-level | 東京都立大学 (1 token) |
|
| 370 |
+
| Coarse | Bunsetsu | Phrase-level | 東京都立大学 (1 phrase) |
|
| 371 |
+
|
| 372 |
+
**Key innovation**: Japanese UD maintains **parallel treebanks** at different granularities — UD_Japanese-GSD (SUW) and UD_Japanese-GSDLUW (LUW). This acknowledges that no single segmentation is "correct."
|
| 373 |
+
|
| 374 |
+
**Key tools**: MeCab (lattice + Viterbi, most used), Juman++ (RNN-based), Sudachi (three modes: A/B/C for fine/medium/coarse).
|
| 375 |
+
|
| 376 |
+
**Accuracy**: MeCab + UniDic achieves ~97--99% F1 for SUW segmentation.
|
| 377 |
+
|
| 378 |
+
**Lesson for Vietnamese**: The multi-granularity approach is directly applicable. Vietnamese could maintain parallel views: syllable-level (raw text), word-level (standard segmentation), and compound-level (merged legal terms).
|
| 379 |
+
|
| 380 |
+
### 8.4 Thai
|
| 381 |
+
|
| 382 |
+
**Standards**: BEST corpus (5M words, 4 genres) is the primary benchmark. Thai Character Clusters (TCC) provide sub-syllable constraints for dictionary-based methods.
|
| 383 |
+
|
| 384 |
+
**Key tools**: DeepCut (CNN, ~96.3% F1 — SOTA), AttaCut (dilated CNN, ~91% but 6x faster), PyThaiNLP (unified interface).
|
| 385 |
+
|
| 386 |
+
**UD status**: UD_Thai-PUD (1,000 sentences) exists but no Thai-specific annotation guidelines — a recognized gap.
|
| 387 |
+
|
| 388 |
+
**Parallel to Vietnamese**: Both are Southeast Asian isolating languages. Thai has even less boundary information (no spaces at all). Both have limited UD resources. Thai's CNN-based tools (DeepCut) could inspire Vietnamese approaches.
|
| 389 |
+
|
| 390 |
+
### 8.5 Korean
|
| 391 |
+
|
| 392 |
+
**Unique challenge**: Korean is agglutinative. Spaces mark eojeol boundaries, but eojeols contain stems + particles + endings that need further decomposition.
|
| 393 |
+
|
| 394 |
+
**Key finding** (Park, 2023): **Separating only functional morphemes** (case markers, verbal endings) while keeping derivational suffixes intact achieves optimal parsing performance — better than full morpheme decomposition or no decomposition.
|
| 395 |
+
|
| 396 |
+
**UD treebanks**: UD_Korean-GSD (eojeol-based) vs UD_Korean-Kaist (morpheme-separated). Recent K-UD project (2024) is standardizing guidelines.
|
| 397 |
+
|
| 398 |
+
**Accuracy**: Eojeol-level segmentation is trivial (~99--100% F1 since spaces mark boundaries). The challenge is in morpheme-level analysis.
|
| 399 |
+
|
| 400 |
+
**Lesson for Vietnamese**: The principle of selective segmentation (only separate where it matters for parsing) could help Vietnamese decide which compound boundaries to annotate.
|
| 401 |
+
|
| 402 |
+
### 8.6 Khmer and Myanmar
|
| 403 |
+
|
| 404 |
+
Both are low-resource Southeast Asian languages with no word-level space markers.
|
| 405 |
+
|
| 406 |
+
**Khmer**: khmer-nltk (CRF-based, ~99.7% F1 on test data). No official UD treebank yet. A 20,000-sentence corpus is in development.
|
| 407 |
+
|
| 408 |
+
**Myanmar**: Two-stage pipeline (syllable segmentation → word segmentation), paralleling Vietnamese's syllable-to-word process. myWord tool trained on 12M manually segmented words. UD treebank in development.
|
| 409 |
+
|
| 410 |
+
**UnifiedCut** (2024): A multi-head attention model handles Thai, Burmese, and Khmer simultaneously, demonstrating that a single architecture can serve multiple Southeast Asian languages. Could be extended to Vietnamese.
|
| 411 |
+
|
| 412 |
+
### 8.7 Cross-Lingual Benchmarks
|
| 413 |
+
|
| 414 |
+
From CoNLL 2018 Shared Task and Trankit evaluations:
|
| 415 |
+
|
| 416 |
+
| Language | Trankit F1 | Stanza F1 | Gap |
|
| 417 |
+
|:---|---:|---:|---:|
|
| 418 |
+
| Korean-Kaist | 98.70 | 100.00 | -1.30 |
|
| 419 |
+
| Chinese-GSD | 97.75 | 92.83 | +4.92 |
|
| 420 |
+
| Japanese-GSD | 95.25 | 92.67 | +2.58 |
|
| 421 |
+
| **Vietnamese-VTB** | **94.88** | **87.25** | **+7.63** |
|
| 422 |
+
|
| 423 |
+
Vietnamese has the **largest gap** between Trankit and Stanza performance (7.63 points), indicating the task is particularly sensitive to model architecture. Vietnamese segmentation F1 (87--95%) is lower than Chinese (93--98%), reflecting fewer training resources and the unique syllable-space challenge.
|
| 424 |
+
|
| 425 |
+
**Universal Word Segmentation** (Shao et al., TACL 2018): A single BiRNN-CRF architecture achieves SOTA on all UD languages. Key finding: segmentation accuracy correlates positively with presence of word boundary markers and negatively with vocabulary size of non-segmental terms.
|
| 426 |
+
|
| 427 |
+
### 8.8 Methods Applicable to Vietnamese
|
| 428 |
+
|
| 429 |
+
Based on this cross-linguistic survey, the following methods could improve Vietnamese word segmentation:
|
| 430 |
+
|
| 431 |
+
| Method | Source Language | Description | Applicability |
|
| 432 |
+
|:---|:---|:---|:---|
|
| 433 |
+
| Multi-criteria learning | Chinese | Single model segments according to multiple standards | Handle different Vietnamese segmentation conventions |
|
| 434 |
+
| Multi-granularity treebanks | Japanese | Parallel treebanks at SUW/LUW levels | Maintain syllable/word/compound views of UDD-1 |
|
| 435 |
+
| Universal segmentation | Cross-lingual | BiRNN-CRF character-level labeling | Already strong on Vietnamese in UD evaluations |
|
| 436 |
+
| Joint segmentation + parsing | Chinese/Korean | Let syntax guide segmentation | Could reduce cascading errors in UDD-1 |
|
| 437 |
+
| Selective morpheme separation | Korean | Only separate boundaries that matter for parsing | Focus on functional vs. content word boundaries |
|
| 438 |
+
| Cross-lingual transfer | Thai/Khmer/Myanmar | UnifiedCut multi-language model | Leverage shared Southeast Asian properties |
|
| 439 |
+
| Domain-adaptive segmentation | Chinese (pkuseg) | Fine-tune for legal domain | Address news→legal domain shift in UDD-1 |
|
| 440 |
+
|
| 441 |
+
### 8.9 Key Takeaways
|
| 442 |
+
|
| 443 |
+
1. **The compound word problem is universal**: Every language surveyed faces ambiguity at compound boundaries. Vietnamese's syllable-level spaces make this visible; Chinese/Japanese/Thai hide it within character sequences.
|
| 444 |
+
|
| 445 |
+
2. **No single correct segmentation exists**: Chinese has CTB/PKU/MSR, Japanese has SUW/LUW, Korean has eojeol/morpheme. The choice depends on the task. Vietnamese should acknowledge this rather than forcing a single standard.
|
| 446 |
+
|
| 447 |
+
3. **Multi-granularity is principled**: Japanese's parallel treebanks (GSD vs GSDLUW) suggest that maintaining multiple segmentation views is more honest than choosing one.
|
| 448 |
+
|
| 449 |
+
4. **Segmentation errors cascade**: CoNLL shared tasks show word segmentation errors directly impact LAS. Vietnamese's lower segmentation F1 compared to Chinese is a bottleneck for parsing quality.
|
| 450 |
+
|
| 451 |
+
5. **Domain adaptation matters**: Chinese pkuseg shows domain-specific models significantly outperform general models. UDD-1's news-trained tokenizer applied to legal text faces the same challenge.
|
| 452 |
+
|
| 453 |
+
## 10. Recommendations for Future Versions
|
| 454 |
+
|
| 455 |
+
1. **Gold-standard sample**: Manually segment 200+ sentences following these guidelines to establish segmentation accuracy on legal text.
|
| 456 |
+
2. **Consistency enforcement**: Post-process to ensure the same word form is always segmented the same way within the treebank (cf. Chinese multi-criteria learning approach).
|
| 457 |
+
3. **Legal term dictionary**: Build a domain-specific dictionary of legal compound terms to guide segmentation (cf. Chinese pkuseg domain adaptation).
|
| 458 |
+
4. **Cross-tool validation**: Compare Underthesea segmentation with VnCoreNLP (RDRsegmenter) on the same sentences to identify systematic differences.
|
| 459 |
+
5. **Fix known errors**: Correct cross-boundary merges (204 cases) and enforce consistent segmentation for the 158 identified inconsistent forms.
|
| 460 |
+
6. **Consider multi-granularity annotation**: Following the Japanese GSD/GSDLUW model, maintain metadata about segmentation granularity to support different use cases.
|
| 461 |
+
7. **Explore joint segmentation + parsing**: Chinese and Korean research shows that joint models reduce cascading segmentation errors in downstream parsing.
|
| 462 |
+
8. **Domain-adaptive fine-tuning**: Fine-tune the word segmenter on a small set of manually segmented legal sentences, following the pkuseg approach for domain adaptation.
|
| 463 |
+
|
| 464 |
+
## References
|
| 465 |
+
|
| 466 |
+
### Vietnamese Word Segmentation
|
| 467 |
+
|
| 468 |
+
- de Marneffe, M.-C., Manning, C.D., Nivre, J., and Zeman, D. (2021). Universal Dependencies. *Computational Linguistics*, 47(2):255--308.
|
| 469 |
+
- Nguyen, Q.T., Nguyen, N.L.T., and Miyao, Y. (2012). Comparing Different Criteria for Vietnamese Word Segmentation. In *Proceedings of the Third Named Entities Workshop (NEWS 2012)*. https://aclanthology.org/W12-5005/
|
| 470 |
+
- Nguyen, Q.T., Nguyen, N.L.T., Nguyen, T.P., and Phan, T.H. (2016). Challenges and Solutions for Consistent Annotation of Vietnamese Treebank. In *Proceedings of LREC 2016*. https://aclanthology.org/L16-1243/
|
| 471 |
+
- Nguyen, Q.T., Nguyen, N.L.T., Nguyen, T.P., and Phan, T.H. (2018). Ensuring Annotation Consistency and Accuracy for Vietnamese Treebank. *Language Resources and Evaluation*, 52(3). https://link.springer.com/article/10.1007/s10579-017-9398-3
|
| 472 |
+
- Nguyen, T.M.H., Romary, L., Rossignol, M., and Vu, X.L. (2008). Word Segmentation of Vietnamese Texts: a Comparison of Approaches. In *Proceedings of LREC 2008*. https://aclanthology.org/L08-1355/
|
| 473 |
+
- Nguyen, P.-T., Vu, X.-L., Nguyen, T.M.H., Nguyen, V.-H., and Le, H.-P. (2009). Building a Large Syntactically-Annotated Corpus of Vietnamese. In *Proceedings of LAW III*. https://aclanthology.org/W09-3035/
|
| 474 |
+
|
| 475 |
+
### Chinese Word Segmentation
|
| 476 |
+
|
| 477 |
+
- Emerson, T. (2005). The Second International Chinese Word Segmentation Bakeoff. In *Proceedings of the Fourth SIGHAN Workshop*. https://aclanthology.org/I05-3017/
|
| 478 |
+
- Huang, W., Cheng, X., Chen, K., Wang, T., and Chu, W. (2019). Toward Fast and Accurate Neural Chinese Word Segmentation with Multi-Criteria Learning. https://arxiv.org/abs/1903.04190
|
| 479 |
+
- Tian, Y., Song, Y., Xia, F., Zhang, T., and Wang, Y. (2020). Improving Chinese Word Segmentation with Wordhood Memory Networks. In *Proceedings of ACL 2020*.
|
| 480 |
+
|
| 481 |
+
### Japanese Word Segmentation
|
| 482 |
+
|
| 483 |
+
- Asahara, M., Kanayama, H., Tanaka, T., Miyao, Y., Uematsu, S., Mori, S., Matsumoto, Y., Omura, M., and Murawaki, Y. (2018). Universal Dependencies Version 2 for Japanese. In *Proceedings of LREC 2018*. https://aclanthology.org/L18-1287/
|
| 484 |
+
- Kudo, T., Yamamoto, K., and Matsumoto, Y. (2004). Applying Conditional Random Fields to Japanese Morphological Analysis. In *Proceedings of EMNLP 2004*.
|
| 485 |
+
|
| 486 |
+
### Thai Word Segmentation
|
| 487 |
+
|
| 488 |
+
- Chormai, P., Prasertsom, P., and Rutherford, A. (2019). AttaCut: A Fast and Accurate Neural Thai Word Segmenter. https://arxiv.org/abs/1911.07056
|
| 489 |
+
|
| 490 |
+
### Korean Word Segmentation
|
| 491 |
+
|
| 492 |
+
- Park, J. (2023). Word Segmentation Granularity in Korean. https://arxiv.org/abs/2309.03713
|
| 493 |
+
- K-UD Project (2024). Revising Korean Universal Dependencies Guidelines. https://arxiv.org/html/2412.00856
|
| 494 |
+
|
| 495 |
+
### Cross-Lingual Word Segmentation
|
| 496 |
+
|
| 497 |
+
- Shao, Y., Hardmeier, C., Tiedemann, J., and Nivre, J. (2018). Universal Word Segmentation: Implementation and Interpretation. *Transactions of the ACL*, 6:421--435. https://aclanthology.org/Q18-1030/
|
| 498 |
+
- Nguyen, V.M., Nguyen, D.Q., and Nguyen, A.T. (2021). Trankit: A Light-Weight Transformer-based Toolkit for Multilingual NLP. In *Proceedings of EACL 2021 Demonstrations*. https://aclanthology.org/2021.eacl-demos.10/
|
| 499 |
+
- Zeman, D. et al. (2018). CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. In *Proceedings of CoNLL 2018*. https://universaldependencies.org/conll18/
|
| 500 |
+
|
| 501 |
+
### UD Word Segmentation Policy
|
| 502 |
+
|
| 503 |
+
- Universal Dependencies v2 Tokenization: https://universaldependencies.org/u/overview/tokenization.html
|
| 504 |
+
- Universal Dependencies v2 Segmentation Changes: https://universaldependencies.org/v2/segmentation.html
|
| 505 |
+
- ISO 24614-1:2010 Word Segmentation of Written Texts: https://www.iso.org/standard/41665.html
|
SEGMENTATION_EVAL.md
ADDED
|
@@ -0,0 +1,1017 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UDD-1 Word Segmentation Evaluation
|
| 2 |
+
|
| 3 |
+
**Files analyzed:** vi_udd-ud-train.conllu, vi_udd-ud-dev.conllu, vi_udd-ud-test.conllu
|
| 4 |
+
**Total sentences:** 10,000
|
| 5 |
+
**Total tokens:** 230,709
|
| 6 |
+
|
| 7 |
+
## 1. Syllable Distribution per Token
|
| 8 |
+
|
| 9 |
+
### 1.1 Overall Distribution
|
| 10 |
+
|
| 11 |
+
| Syllables | Count | Percentage |
|
| 12 |
+
|---:|---:|---:|
|
| 13 |
+
| 1 | 143,336 | 62.13% |
|
| 14 |
+
| 2 | 84,417 | 36.59% |
|
| 15 |
+
| 3 | 2,088 | 0.91% |
|
| 16 |
+
| 4+ | 868 | 0.38% |
|
| 17 |
+
|
| 18 |
+
### 1.2 Distribution by UPOS
|
| 19 |
+
|
| 20 |
+
| UPOS | 1-syl | 2-syl | 3-syl | 4+-syl | Total | Avg syl |
|
| 21 |
+
|:---|---:|---:|---:|---:|---:|---:|
|
| 22 |
+
| NOUN | 29,746 | 42,261 | 1,353 | 593 | 73,953 | 1.64 |
|
| 23 |
+
| VERB | 21,646 | 34,358 | 541 | 188 | 56,733 | 1.64 |
|
| 24 |
+
| PUNCT | 26,101 | 4 | 5 | 0 | 26,110 | 1.00 |
|
| 25 |
+
| ADP | 21,452 | 1,063 | 10 | 9 | 22,534 | 1.05 |
|
| 26 |
+
| AUX | 8,310 | 134 | 0 | 0 | 8,444 | 1.02 |
|
| 27 |
+
| ADV | 6,500 | 1,353 | 5 | 9 | 7,867 | 1.18 |
|
| 28 |
+
| NUM | 6,836 | 593 | 44 | 36 | 7,509 | 1.11 |
|
| 29 |
+
| ADJ | 3,862 | 3,168 | 58 | 5 | 7,093 | 1.47 |
|
| 30 |
+
| CCONJ | 5,946 | 11 | 0 | 0 | 5,957 | 1.00 |
|
| 31 |
+
| DET | 4,327 | 913 | 65 | 21 | 5,326 | 1.21 |
|
| 32 |
+
|
| 33 |
+
## 2. Anomalous Token Detection
|
| 34 |
+
|
| 35 |
+
### 2a. Long Tokens (4+ syllables)
|
| 36 |
+
|
| 37 |
+
Total occurrences: 868
|
| 38 |
+
Unique tokens: 111
|
| 39 |
+
|
| 40 |
+
**Top 30 by frequency:**
|
| 41 |
+
|
| 42 |
+
| Token | Count | UPOS | Syllables |
|
| 43 |
+
|:---|---:|:---|---:|
|
| 44 |
+
| xã hội chủ nghĩa Việt Nam | 89 | NOUN | 6 |
|
| 45 |
+
| tố tụng dân sự | 87 | NOUN | 4 |
|
| 46 |
+
| quản lý nhà nước | 72 | NOUN | 4 |
|
| 47 |
+
| Tòa án nhân dân | 64 | NOUN | 4 |
|
| 48 |
+
| Bộ luật hình sự | 52 | VERB | 4 |
|
| 49 |
+
| Ủy ban nhân dân | 49 | NUM | 4 |
|
| 50 |
+
| Hội thẩm nhân dân | 30 | NOUN | 4 |
|
| 51 |
+
| Viện kiểm sát nhân dân | 26 | NOUN | 5 |
|
| 52 |
+
| bảo hiểm y tế | 26 | VERB | 4 |
|
| 53 |
+
| Bộ luật dân sự | 25 | NOUN | 4 |
|
| 54 |
+
| Tòa án nhân dân tối cao | 23 | NOUN | 6 |
|
| 55 |
+
| tố tụng hình sự | 22 | NOUN | 4 |
|
| 56 |
+
| Viện kiểm sát nhân dân tối cao | 18 | VERB | 7 |
|
| 57 |
+
| kết cấu hạ tầng | 16 | VERB | 4 |
|
| 58 |
+
| phương tiện thông tin đại chúng | 14 | NOUN | 6 |
|
| 59 |
+
| sở hữu toàn dân | 11 | NOUN | 4 |
|
| 60 |
+
| Hội đồng bộ trưởng | 10 | NUM | 4 |
|
| 61 |
+
| bảo hiểm xã hội | 10 | VERB | 4 |
|
| 62 |
+
| dân tộc thiểu số | 10 | NOUN | 4 |
|
| 63 |
+
| tiết lộ giới tính | 9 | VERB | 4 |
|
| 64 |
+
| tư cách pháp nhân | 8 | NOUN | 4 |
|
| 65 |
+
| nghĩa vụ quân sự | 7 | NOUN | 4 |
|
| 66 |
+
| trưng cầu ý dân | 7 | VERB | 4 |
|
| 67 |
+
| luật tố tụng dân sự | 6 | NOUN | 5 |
|
| 68 |
+
| an toàn lao động | 6 | NOUN | 4 |
|
| 69 |
+
| Mặt trận Tổ quốc Việt Nam | 6 | NOUN | 6 |
|
| 70 |
+
| sở hữu trí tuệ | 6 | NOUN | 4 |
|
| 71 |
+
| quy phạm pháp luật | 6 | NOUN | 4 |
|
| 72 |
+
| bất khả xâm phạm | 5 | X | 4 |
|
| 73 |
+
| cơ quan chuyên môn | 5 | NOUN | 4 |
|
| 74 |
+
|
| 75 |
+
### 2b. Possible Cross-Boundary Merges
|
| 76 |
+
|
| 77 |
+
Tokens (non-PROPN) with uppercase letters after spaces, suggesting
|
| 78 |
+
incorrect merging of adjacent words.
|
| 79 |
+
|
| 80 |
+
Total occurrences: 204
|
| 81 |
+
Unique tokens: 61
|
| 82 |
+
|
| 83 |
+
| Token | Count | UPOS | Example sent_id |
|
| 84 |
+
|:---|---:|:---|:---|
|
| 85 |
+
| xã hội chủ nghĩa Việt Nam | 89 | NOUN | s8897 |
|
| 86 |
+
| Bộ Tư pháp | 11 | VERB | s6446 |
|
| 87 |
+
| Mặt trận Tổ quốc Việt Nam | 6 | NOUN | s9170 |
|
| 88 |
+
| tố tụng Người | 5 | VERB | s4923 |
|
| 89 |
+
| CHỦ TỊCH | 5 | NOUN | s6676 |
|
| 90 |
+
| Nguyễn Sinh Hùng | 5 | PUNCT | s6676 |
|
| 91 |
+
| khóa XIII | 5 | NOUN | s6675 |
|
| 92 |
+
| thường vụ Quốc hội | 5 | NOUN | s8509 |
|
| 93 |
+
| kề Quyền | 4 | VERB | s759 |
|
| 94 |
+
| Luật Bảo hiểm xã hội | 3 | NUM | s9589 |
|
| 95 |
+
| Luật Bảo vệ | 3 | NOUN | s9738 |
|
| 96 |
+
| khóa XV | 3 | NOUN | s9600 |
|
| 97 |
+
| Bộ luật Lao động | 3 | NUM | s9280 |
|
| 98 |
+
| Pháp lệnh Dân số | 3 | NUM | s9742 |
|
| 99 |
+
| và Phát triển nông thôn | 2 | NOUN | s9964 |
|
| 100 |
+
| giám đốc thẩm Trong | 2 | NOUN | s8451 |
|
| 101 |
+
| lập Quỹ | 2 | VERB | s3346 |
|
| 102 |
+
| thừa kế Người | 2 | DET | s737 |
|
| 103 |
+
| Bộ Tài nguyên và Môi trường | 2 | NOUN | s2392 |
|
| 104 |
+
| Đảng Cộng sản Việt Nam | 2 | NOUN | s9298 |
|
| 105 |
+
| chủ sở hữu Người | 2 | NOUN | s612 |
|
| 106 |
+
| tảo hôn Người | 1 | NOUN | s4002 |
|
| 107 |
+
| tái thẩm Các | 1 | DET | s8507 |
|
| 108 |
+
| quyền sở hữu Bảo lưu | 1 | NOUN | s1054 |
|
| 109 |
+
| hàm Thiếu | 1 | NOUN | s9416 |
|
| 110 |
+
| Tổng liên đoàn Lao động | 1 | NOUN | s9033 |
|
| 111 |
+
| tái thẩm Thời hạn | 1 | NOUN | s6140 |
|
| 112 |
+
| tố tụng Trong | 1 | DET | s6735 |
|
| 113 |
+
| ĐẠI LÝ | 1 | NOUN | s3130 |
|
| 114 |
+
| Hội đồng Trọng tài | 1 | VERB | s2770 |
|
| 115 |
+
|
| 116 |
+
### 2c. Legal Term Segmentation Consistency
|
| 117 |
+
|
| 118 |
+
| Term | As Single Token | As Split Tokens | Total | Consistency |
|
| 119 |
+
|:---|---:|---:|---:|---:|
|
| 120 |
+
| quy định | 2,230 | 0 | 2,230 | 100.0% (single) |
|
| 121 |
+
| tài sản | 1,516 | 0 | 1,516 | 100.0% (single) |
|
| 122 |
+
| pháp luật | 1,154 | 0 | 1,154 | 100.0% (single) |
|
| 123 |
+
| cơ quan | 1,106 | 0 | 1,106 | 100.0% (single) |
|
| 124 |
+
| nghĩa vụ | 1,025 | 0 | 1,025 | 100.0% (single) |
|
| 125 |
+
| vụ án | 0 | 892 | 892 | 100.0% (split) |
|
| 126 |
+
| tổ chức | 728 | 0 | 728 | 100.0% (single) |
|
| 127 |
+
| giải quyết | 707 | 0 | 707 | 100.0% (single) |
|
| 128 |
+
| trách nhiệm | 669 | 0 | 669 | 100.0% (single) |
|
| 129 |
+
| thẩm quyền | 651 | 0 | 651 | 100.0% (single) |
|
| 130 |
+
| hợp đồng | 608 | 0 | 608 | 100.0% (single) |
|
| 131 |
+
| cá nhân | 440 | 0 | 440 | 100.0% (single) |
|
| 132 |
+
| bồi thường | 346 | 0 | 346 | 100.0% (single) |
|
| 133 |
+
| khiếu nại | 280 | 0 | 280 | 100.0% (single) |
|
| 134 |
+
| vi phạm | 262 | 0 | 262 | 100.0% (single) |
|
| 135 |
+
| hình sự | 222 | 0 | 222 | 100.0% (single) |
|
| 136 |
+
| quyền lợi | 117 | 0 | 117 | 100.0% (single) |
|
| 137 |
+
| tố cáo | 79 | 0 | 79 | 100.0% (single) |
|
| 138 |
+
| xử phạt | 43 | 0 | 43 | 100.0% (single) |
|
| 139 |
+
|
| 140 |
+
## 3. Inconsistent Segmentation
|
| 141 |
+
|
| 142 |
+
Cases where two adjacent tokens appear elsewhere as a single token,
|
| 143 |
+
or vice versa. Sorted by total occurrences.
|
| 144 |
+
|
| 145 |
+
Total inconsistent forms: 158
|
| 146 |
+
|
| 147 |
+
| Token | As Single | As Split (bigram) | Total |
|
| 148 |
+
|:---|---:|---:|---:|
|
| 149 |
+
| phiên tòa | 18 | 576 | 594 |
|
| 150 |
+
| tàu biển | 524 | 32 | 556 |
|
| 151 |
+
| pháp nhân | 311 | 12 | 323 |
|
| 152 |
+
| trở lên | 1 | 235 | 236 |
|
| 153 |
+
| tòa án nhân dân | 64 | 128 | 192 |
|
| 154 |
+
| di chúc | 190 | 1 | 191 |
|
| 155 |
+
| thì phải | 16 | 169 | 185 |
|
| 156 |
+
| hàng hóa | 6 | 175 | 181 |
|
| 157 |
+
| sức khỏe | 120 | 44 | 164 |
|
| 158 |
+
| viện trưởng | 137 | 11 | 148 |
|
| 159 |
+
| ủy quyền | 79 | 67 | 146 |
|
| 160 |
+
| chủ tọa | 7 | 125 | 132 |
|
| 161 |
+
| bảo vệ quyền | 3 | 112 | 115 |
|
| 162 |
+
| bị hại | 6 | 96 | 102 |
|
| 163 |
+
| giao kết | 93 | 2 | 95 |
|
| 164 |
+
| tòa án nhân dân tối cao | 23 | 64 | 87 |
|
| 165 |
+
| hòa giải | 30 | 53 | 83 |
|
| 166 |
+
| quản lý nhà nước | 75 | 1 | 76 |
|
| 167 |
+
| án phí | 72 | 2 | 74 |
|
| 168 |
+
| tiền công | 2 | 62 | 64 |
|
| 169 |
+
| thuê khoán | 2 | 62 | 64 |
|
| 170 |
+
| tàu thuyền | 60 | 3 | 63 |
|
| 171 |
+
| khám bệnh | 3 | 57 | 60 |
|
| 172 |
+
| viện kiểm sát nhân dân | 26 | 34 | 60 |
|
| 173 |
+
| xét đơn | 37 | 23 | 60 |
|
| 174 |
+
| ủy ban nhân dân | 49 | 10 | 59 |
|
| 175 |
+
| chìm đắm | 40 | 8 | 48 |
|
| 176 |
+
| viện kiểm sát nhân dân tối cao | 18 | 26 | 44 |
|
| 177 |
+
| tại ngũ | 31 | 12 | 43 |
|
| 178 |
+
| đe dọa | 30 | 11 | 41 |
|
| 179 |
+
| cầm giữ | 39 | 2 | 41 |
|
| 180 |
+
| vi phạm pháp luật | 1 | 39 | 40 |
|
| 181 |
+
| tổn thất chung | 4 | 36 | 40 |
|
| 182 |
+
| người thân thích | 37 | 2 | 39 |
|
| 183 |
+
| lai dắt | 34 | 4 | 38 |
|
| 184 |
+
| giao nộp | 37 | 1 | 38 |
|
| 185 |
+
| phong tỏa | 30 | 7 | 37 |
|
| 186 |
+
| làm chủ | 6 | 30 | 36 |
|
| 187 |
+
| bốc hàng | 1 | 35 | 36 |
|
| 188 |
+
| thì có | 10 | 25 | 35 |
|
| 189 |
+
| gọi là | 33 | 1 | 34 |
|
| 190 |
+
| có điều kiện | 2 | 31 | 33 |
|
| 191 |
+
| từ bỏ | 32 | 1 | 33 |
|
| 192 |
+
| xét tính | 31 | 2 | 33 |
|
| 193 |
+
| người lập | 4 | 28 | 32 |
|
| 194 |
+
| xóa án tích | 4 | 28 | 32 |
|
| 195 |
+
| công cứu hộ | 4 | 28 | 32 |
|
| 196 |
+
| quyền dân sự | 6 | 26 | 32 |
|
| 197 |
+
| phòng ngừa | 31 | 1 | 32 |
|
| 198 |
+
| sở hữu chung | 7 | 24 | 31 |
|
| 199 |
+
|
| 200 |
+
## 4. Comparison with `word_tokenize()`
|
| 201 |
+
|
| 202 |
+
**Skipped**: `underthesea` not available or not requested. Use `--compare-tokenize` to enable.
|
| 203 |
+
|
| 204 |
+
## 5. Manual Review Samples
|
| 205 |
+
|
| 206 |
+
Total samples: 100 (30 suspicious, 70 random)
|
| 207 |
+
|
| 208 |
+
### Sample 1 [SUSPICIOUS] — s2336
|
| 209 |
+
|
| 210 |
+
**Text:** Tài sản của tàu biển là các đồ vật , trang thiết bị trên tàu biển mà không phải là các bộ phận cấu thành của tàu biển .
|
| 211 |
+
**Tokens:** `Tài sản` `của` `tàu biển` `là` `các` `đồ vật` `,` `trang thiết bị` `trên` `tàu biển` `mà` `không` `phải` `là` `các` `bộ phận` `cấu thành` `của` `tàu biển` `.`
|
| 212 |
+
**UPOS:** NOUN ADP NOUN AUX DET NOUN PUNCT NOUN ADP NOUN SCONJ ADV AUX AUX DET NOUN NOUN ADP NOUN PUNCT
|
| 213 |
+
|
| 214 |
+
### Sample 2 [SUSPICIOUS] — s1420
|
| 215 |
+
|
| 216 |
+
**Text:** Tài sản thuộc sở hữu chung đem bán đấu giá phải có sự đồng ý của tất cả các chủ sở hữu chung , trừ trường hợp có thỏa thuận khác hoặc pháp luật có quy định khác .
|
| 217 |
+
**Tokens:** `Tài sản` `thuộc` `sở hữu` `chung` `đem` `bán đấu giá` `phải` `có` `sự` `đồng ý` `của` `tất cả` `các` `chủ sở hữu` `chung` `,` `trừ` `trường hợp` `có` `thỏa thuận` `khác` `hoặc` `pháp luật` `có` `quy định` `khác` `.`
|
| 218 |
+
**UPOS:** NOUN VERB NOUN ADJ VERB NOUN AUX VERB NOUN VERB ADP PRON DET NOUN ADJ PUNCT VERB NOUN VERB NOUN ADJ CCONJ NOUN VERB NOUN ADJ PUNCT
|
| 219 |
+
**Flags:** inconsistent: "sở hữu" + "chung" (also as "sở hữu chung")
|
| 220 |
+
|
| 221 |
+
### Sample 3 [SUSPICIOUS] — s3864
|
| 222 |
+
|
| 223 |
+
**Text:** Tội vô ý gây thương tích hoặc gây tổn hại cho sức khỏe của người khác 1 .
|
| 224 |
+
**Tokens:** `Tội` `vô ý` `gây` `thương tích` `hoặc` `gây` `tổn hại` `cho` `sức khỏe` `của` `người` `khác` `1` `.`
|
| 225 |
+
**UPOS:** NOUN NOUN VERB VERB CCONJ VERB VERB ADP NOUN ADP NOUN ADJ NUM PUNCT
|
| 226 |
+
|
| 227 |
+
### Sample 4 [SUSPICIOUS] — s3411
|
| 228 |
+
|
| 229 |
+
**Text:** Khi thực hiện nghĩa vụ này , người được bảo hiểm phải thực hiện theo chỉ dẫn hợp lý của người bảo hiểm .
|
| 230 |
+
**Tokens:** `Khi` `thực hiện` `nghĩa vụ` `này` `,` `người` `được` `bảo hiểm` `phải` `thực hiện` `theo` `chỉ dẫn` `hợp lý` `của` `người` `bảo hiểm` `.`
|
| 231 |
+
**UPOS:** NOUN VERB NOUN PRON PUNCT NOUN AUX VERB AUX VERB ADP NOUN ADJ ADP DET VERB PUNCT
|
| 232 |
+
|
| 233 |
+
### Sample 5 [SUSPICIOUS] — s322
|
| 234 |
+
|
| 235 |
+
**Text:** Các pháp nhân có thể hợp nhất thành một pháp nhân mới .
|
| 236 |
+
**Tokens:** `Các` `pháp nhân` `có thể` `hợp` `nhất` `thành` `một` `pháp nhân` `mới` `.`
|
| 237 |
+
**UPOS:** DET NOUN ADV ADJ ADV ADP NUM NOUN ADJ PUNCT
|
| 238 |
+
|
| 239 |
+
### Sample 6 [SUSPICIOUS] — s2514
|
| 240 |
+
|
| 241 |
+
**Text:** Thuyền viên có trách nhiệm thực hiện đầy đủ biện pháp bảo đảm an toàn lao động , vệ sinh lao động do chủ tàu lập ra .
|
| 242 |
+
**Tokens:** `Thuyền viên` `có` `trách nhiệm` `thực hiện` `đầy đủ` `biện pháp` `bảo đảm` `an toàn lao động` `,` `vệ sinh` `lao động` `do` `chủ` `tàu` `lập` `ra` `.`
|
| 243 |
+
**UPOS:** NOUN VERB NOUN VERB ADV NOUN VERB NOUN PUNCT VERB NOUN ADP NOUN NOUN VERB VERB PUNCT
|
| 244 |
+
**Flags:** long_token: "an toàn lao động"
|
| 245 |
+
|
| 246 |
+
### Sample 7 [SUSPICIOUS] — s2293
|
| 247 |
+
|
| 248 |
+
**Text:** Giấy chứng nhận này không có giá trị thay thế Giấy chứng nhận đăng ký tàu biển Việt Nam .
|
| 249 |
+
**Tokens:** `Giấy` `chứng nhận` `này` `không` `có` `giá trị` `thay thế` `Giấy` `chứng nhận` `đăng ký` `tàu biển` `Việt Nam` `.`
|
| 250 |
+
**UPOS:** NOUN VERB PRON ADV VERB NOUN VERB NOUN VERB VERB NOUN PROPN PUNCT
|
| 251 |
+
|
| 252 |
+
### Sample 8 [SUSPICIOUS] — s8935
|
| 253 |
+
|
| 254 |
+
**Text:** Cơ quan có thẩm quyền của Việt Nam hợp tác với cơ quan có thẩm quyền của nước ngoài trong việc truy tìm , tạm giữ , kê biên , phong tỏa , tịch thu , xử lý tài sản do phạm tội mà có để phục vụ yêu cầu điều tra , truy tố , xét xử và thi hành án hình sự .
|
| 255 |
+
**Tokens:** `Cơ quan` `có` `thẩm quyền` `của` `Việt Nam` `hợp tác` `với` `cơ quan` `có` `thẩm quyền` `của` `nước ngoài` `trong` `việc` `truy tìm` `,` `tạm` `giữ` `,` `kê biên` `,` `phong tỏa` `,` `tịch thu` `,` `xử lý` `tài sản` `do` `phạm tội` `mà` `có` `để` `phục vụ` `yêu cầu` `điều tra` `,` `truy tố` `,` `xét xử` `và` `thi hành` `án` `hình sự` `.`
|
| 256 |
+
**UPOS:** NOUN VERB NOUN ADP PROPN VERB ADP NOUN VERB NOUN ADP NOUN ADP NOUN VERB PUNCT ADJ VERB PUNCT NOUN PUNCT NOUN PUNCT VERB PUNCT VERB NOUN ADP VERB SCONJ VERB ADP VERB VERB VERB PUNCT VERB PUNCT VERB CCONJ VERB NOUN NOUN PUNCT
|
| 257 |
+
|
| 258 |
+
### Sample 9 [SUSPICIOUS] — s2184
|
| 259 |
+
|
| 260 |
+
**Text:** Vùng quay trở là vùng nước được thiết lập và công bố để tàu thuyền quay trở .
|
| 261 |
+
**Tokens:** `Vùng` `quay` `trở` `là` `vùng` `nước` `được` `thiết lập` `và` `công bố` `để` `tàu thuyền` `quay` `trở` `.`
|
| 262 |
+
**UPOS:** NOUN VERB VERB AUX NOUN NOUN AUX VERB CCONJ VERB ADP VERB VERB VERB PUNCT
|
| 263 |
+
|
| 264 |
+
### Sample 10 [SUSPICIOUS] — s9637
|
| 265 |
+
|
| 266 |
+
**Text:** Có chính sách nâng cao chất lượng dân số về thể chất , trí tuệ và tinh thần ; bảo vệ , phát triển dân số đối với các dân tộc thiểu số rất ít người .
|
| 267 |
+
**Tokens:** `Có` `chính sách` `nâng` `cao` `chất lượng` `dân số` `về` `thể chất` `,` `trí tuệ` `và` `tinh thần` `;` `bảo vệ` `,` `phát triển` `dân số` `đối với` `các` `dân tộc thiểu số` `rất` `ít` `người` `.`
|
| 268 |
+
**UPOS:** VERB NOUN VERB ADJ NOUN NOUN ADP NOUN PUNCT NOUN CCONJ NOUN PUNCT VERB PUNCT VERB NOUN ADP DET NOUN ADV ADJ NOUN PUNCT
|
| 269 |
+
**Flags:** long_token: "dân tộc thiểu số"
|
| 270 |
+
|
| 271 |
+
### Sample 11 [SUSPICIOUS] — s6615
|
| 272 |
+
|
| 273 |
+
**Text:** Cơ quan công an có nhiệm vụ bảo vệ phiên tòa hoặc người có nhiệm vụ bảo vệ trật tự phiên tòa thi hành quyết định của chủ tọa phiên tòa về việc buộc rời khỏi phòng xử án hoặc tạm giữ hành chính người gây rối trật tự tại phiên tòa .
|
| 274 |
+
**Tokens:** `Cơ quan` `công an` `có` `nhiệm vụ` `bảo vệ` `phiên` `tòa` `hoặc` `người` `có` `nhiệm vụ` `bảo vệ` `trật tự` `phiên` `tòa` `thi hành` `quyết định` `của` `chủ` `tọa` `phiên` `tòa` `về` `việc` `buộc` `rời` `khỏi` `phòng` `xử án` `hoặc` `tạm` `giữ` `hành chính` `người` `gây rối` `trật tự` `tại` `phiên` `tòa` `.`
|
| 275 |
+
**UPOS:** NOUN NOUN VERB NOUN VERB NOUN NOUN CCONJ NOUN VERB NOUN VERB NOUN NOUN VERB VERB NOUN ADP NOUN NOUN NOUN VERB ADP NOUN VERB VERB VERB NOUN VERB CCONJ ADJ VERB NOUN NOUN VERB NOUN ADP NOUN NOUN PUNCT
|
| 276 |
+
**Flags:** inconsistent: "phiên" + "tòa" (also as "phiên tòa"); inconsistent: "phiên" + "tòa" (also as "phiên tòa"); inconsistent: "chủ" + "tọa" (also as "chủ tọa"); inconsistent: "phiên" + "tòa" (also as "phiên tòa"); inconsistent: "phiên" + "tòa" (also as "phiên tòa")
|
| 277 |
+
|
| 278 |
+
### Sample 12 [SUSPICIOUS] — s1669
|
| 279 |
+
|
| 280 |
+
**Text:** Vận đơn hoặc chứng từ vận chuyển tương đương khác là bằng chứng của việc giao kết hợp đồng giữa các bên .
|
| 281 |
+
**Tokens:** `Vận đơn` `hoặc` `chứng từ` `vận chuyển` `tương đương` `khác` `là` `bằng chứng` `của` `việc` `giao kết` `hợp đồng` `giữa` `các` `bên` `.`
|
| 282 |
+
**UPOS:** NOUN CCONJ VERB VERB ADJ ADJ AUX NOUN ADP NOUN VERB NOUN ADP DET NOUN PUNCT
|
| 283 |
+
|
| 284 |
+
### Sample 13 [SUSPICIOUS] — s1588
|
| 285 |
+
|
| 286 |
+
**Text:** Việc định đoạt tài sản là quyền sử dụng đất , nhà , xưởng sản xuất , tư liệu sản xuất khác phải có thỏa thuận bằng văn bản của tất cả các thành viên ; việc định đoạt tài sản khác do đại diện của các thành viên quyết định , trừ trường hợp có thỏa thu��n khác .
|
| 287 |
+
**Tokens:** `Việc` `định đoạt` `tài sản` `là` `quyền` `sử dụng` `đất` `,` `nhà` `,` `xưởng` `sản xuất` `,` `tư liệu sản xuất` `khác` `phải` `có` `thỏa thuận` `bằng` `văn bản` `của` `tất cả` `các` `thành viên` `;` `việc` `định đoạt` `tài sản` `khác` `do` `đại diện` `của` `các` `thành viên` `quyết định` `,` `trừ` `trường hợp` `có` `thỏa thuận` `khác` `.`
|
| 288 |
+
**UPOS:** NOUN VERB NOUN AUX NOUN VERB NOUN PUNCT NOUN PUNCT NOUN VERB PUNCT NOUN ADJ AUX VERB NOUN ADP NOUN ADP PRON DET NOUN PUNCT NOUN VERB NOUN ADJ ADP VERB ADP DET NOUN VERB PUNCT VERB NOUN VERB NOUN ADJ PUNCT
|
| 289 |
+
**Flags:** long_token: "tư liệu sản xuất"
|
| 290 |
+
|
| 291 |
+
### Sample 14 [SUSPICIOUS] — s2238
|
| 292 |
+
|
| 293 |
+
**Text:** Gây phương hại hoặc đe dọa gây phương hại đến chủ quyền và an ninh quốc gia .
|
| 294 |
+
**Tokens:** `Gây` `phương hại` `hoặc` `đe dọa` `gây` `phương hại` `đến` `chủ quyền` `và` `an ninh` `quốc gia` `.`
|
| 295 |
+
**UPOS:** VERB NOUN CCONJ NOUN VERB VERB ADP NOUN CCONJ NOUN NOUN PUNCT
|
| 296 |
+
|
| 297 |
+
### Sample 15 [SUSPICIOUS] — s32
|
| 298 |
+
|
| 299 |
+
**Text:** Cá nhân , pháp nhân thực hiện quyền dân sự theo ý chí của mình , không được trái với quy định tại Điều 3 và Điều 10 của Bộ luật này .
|
| 300 |
+
**Tokens:** `Cá nhân` `,` `pháp nhân` `thực hiện` `quyền` `dân sự` `theo` `ý chí` `của` `mình` `,` `không` `được` `trái` `với` `quy định` `tại` `Điều` `3` `và` `Điều` `10` `của` `Bộ luật` `này` `.`
|
| 301 |
+
**UPOS:** NOUN PUNCT NOUN VERB NOUN VERB ADP NOUN ADP PRON PUNCT ADV AUX ADJ ADP NOUN ADP NOUN NUM CCONJ NOUN NUM ADP NOUN PRON PUNCT
|
| 302 |
+
**Flags:** inconsistent: "quyền" + "dân sự" (also as "quyền dân sự")
|
| 303 |
+
|
| 304 |
+
### Sample 16 [SUSPICIOUS] — s3282
|
| 305 |
+
|
| 306 |
+
**Text:** Bộ Giao thông vận tải chủ trì tổ chức xử lý tài sản chìm đắm gây nguy hiểm .
|
| 307 |
+
**Tokens:** `Bộ` `Giao thông` `vận tải` `chủ trì` `tổ chức` `xử lý` `tài sản` `chìm đắm` `gây` `nguy hiểm` `.`
|
| 308 |
+
**UPOS:** NOUN NOUN VERB NUM VERB VERB NOUN NOUN VERB ADJ PUNCT
|
| 309 |
+
|
| 310 |
+
### Sample 17 [SUSPICIOUS] — s8492
|
| 311 |
+
|
| 312 |
+
**Text:** Viện kiểm sát phải xác minh những tình tiết mới ; khi xét thấy cần thiết , Viện trưởng Viện kiểm sát có thẩm quyền kháng nghị tái thẩm yêu cầu Cơ quan điều tra có thẩm quyền xác minh tình tiết mới của vụ án và chuyển kết quả xác minh cho Viện kiểm sát .
|
| 313 |
+
**Tokens:** `Viện kiểm sát` `phải` `xác minh` `những` `tình tiết` `mới` `;` `khi` `xét` `thấy` `cần thiết` `,` `Viện trưởng` `Viện kiểm sát` `có` `thẩm quyền` `kháng nghị` `tái thẩm` `yêu cầu` `Cơ quan` `điều tra` `có` `thẩm quyền` `xác minh` `tình tiết` `mới` `của` `vụ` `án` `và` `chuyển` `kết quả` `xác minh` `cho` `Viện kiểm sát` `.`
|
| 314 |
+
**UPOS:** NOUN AUX VERB DET NOUN ADJ PUNCT NOUN VERB VERB ADJ PUNCT NOUN NOUN VERB NOUN VERB NOUN VERB NOUN VERB VERB NOUN VERB NOUN ADJ ADP DET NOUN CCONJ VERB NOUN VERB ADP NOUN PUNCT
|
| 315 |
+
|
| 316 |
+
### Sample 18 [SUSPICIOUS] — s9904
|
| 317 |
+
|
| 318 |
+
**Text:** Đề xuất việc ban hành quy định , nội quy an toàn về phòng cháy và chữa cháy .
|
| 319 |
+
**Tokens:** `Đề xuất` `việc` `ban hành` `quy định` `,` `nội quy an toàn` `về` `phòng` `cháy` `và` `chữa cháy` `.`
|
| 320 |
+
**UPOS:** DET NOUN VERB NOUN PUNCT VERB ADP NOUN VERB CCONJ VERB PUNCT
|
| 321 |
+
**Flags:** long_token: "nội quy an toàn"
|
| 322 |
+
|
| 323 |
+
### Sample 19 [SUSPICIOUS] — s1460
|
| 324 |
+
|
| 325 |
+
**Text:** Điều kiện tặng cho không được vi phạm điều cấm của luật , không trái đạo đức xã hội .
|
| 326 |
+
**Tokens:** `Điều kiện` `tặng` `cho` `không` `được` `vi phạm` `điều cấm` `của` `luật` `,` `không` `trái` `đạo đức` `xã hội` `.`
|
| 327 |
+
**UPOS:** NOUN VERB ADP ADV AUX VERB NOUN ADP NOUN PUNCT ADV VERB NOUN NOUN PUNCT
|
| 328 |
+
|
| 329 |
+
### Sample 20 [SUSPICIOUS] — s9166
|
| 330 |
+
|
| 331 |
+
**Text:** Bảo vệ quyền và lợi ích hợp pháp của cơ quan , tổ chức , cá nhân trong lĩnh vực dân số gắn với phát triển kinh tế - xã hội , bảo vệ môi trường , bản sắc văn hóa dân tộc để phát triển nhanh và bền vững đất nước .
|
| 332 |
+
**Tokens:** `Bảo vệ quyền` `và` `lợi ích` `hợp pháp` `của` `cơ quan` `,` `tổ chức` `,` `cá nhân` `trong` `lĩnh vực` `dân số` `gắn` `với` `phát triển` `kinh tế` `-` `xã hội` `,` `bảo vệ` `môi trường` `,` `bản sắc` `văn hóa` `dân tộc` `để` `phát triển` `nhanh` `và` `bền vững` `đất nước` `.`
|
| 333 |
+
**UPOS:** NOUN CCONJ NOUN ADJ ADP NOUN PUNCT VERB PUNCT NOUN ADP NOUN NOUN VERB ADP VERB NOUN PUNCT NOUN PUNCT VERB NOUN PUNCT NOUN VERB NOUN ADP VERB ADJ CCONJ ADJ NOUN PUNCT
|
| 334 |
+
|
| 335 |
+
### Sample 21 [SUSPICIOUS] — s302
|
| 336 |
+
|
| 337 |
+
**Text:** Chi nhánh , văn phòng đại diện của pháp nhân 1 .
|
| 338 |
+
**Tokens:** `Chi nhánh` `,` `văn phòng` `đại diện` `của` `pháp nhân` `1` `.`
|
| 339 |
+
**UPOS:** NOUN PUNCT NOUN VERB ADP NOUN NUM PUNCT
|
| 340 |
+
|
| 341 |
+
### Sample 22 [SUSPICIOUS] — s8897
|
| 342 |
+
|
| 343 |
+
**Text:** Hành vi phạm tội mà công dân Việt Nam bị kết án ở nước ngoài cũng cấu thành tội phạm theo quy định của Bộ luật hình sự nước Cộng hòa xã hội chủ nghĩa Việt Nam ; 3 .
|
| 344 |
+
**Tokens:** `Hành vi` `phạm tội` `mà` `công dân` `Việt Nam` `bị` `kết án` `ở` `nước ngoài` `cũng` `cấu thành` `tội phạm` `theo` `quy định` `của` `Bộ luật hình sự` `nước` `Cộng hòa` `xã hội chủ nghĩa Việt Nam` `;` `3` `.`
|
| 345 |
+
**UPOS:** NOUN VERB SCONJ NOUN PROPN AUX VERB ADP NOUN ADV VERB NOUN ADP NOUN ADP DET NOUN VERB NOUN PUNCT NUM PUNCT
|
| 346 |
+
**Flags:** long_token: "Bộ luật hình sự"; long_token: "xã hội chủ nghĩa Việt Nam"; cross_boundary: "xã hội chủ nghĩa Việt Nam"
|
| 347 |
+
|
| 348 |
+
### Sample 23 [SUSPICIOUS] — s6568
|
| 349 |
+
|
| 350 |
+
**Text:** Trong thời hạn 05 ngày làm việc , kể từ ngày nhận được văn bản của cơ quan có thẩm quyền ở nước ngoài gửi về thì Bộ Tư pháp phải trả lời cho Tòa án .
|
| 351 |
+
**Tokens:** `Trong` `thời hạn` `05` `ngày` `làm việc` `,` `kể` `từ` `ngày` `nhận` `được` `văn bản` `của` `cơ quan` `có` `thẩm quyền` `ở` `nước ngoài` `gửi` `về` `thì` `Bộ Tư pháp` `phải` `trả lời` `cho` `Tòa án` `.`
|
| 352 |
+
**UPOS:** ADP NOUN VERB NOUN VERB PUNCT VERB ADP NOUN VERB ADV NOUN ADP NOUN VERB NOUN ADP NOUN VERB ADV SCONJ VERB AUX VERB ADP ADJ PUNCT
|
| 353 |
+
**Flags:** cross_boundary: "Bộ Tư pháp"
|
| 354 |
+
|
| 355 |
+
### Sample 24 [SUSPICIOUS] — s3205
|
| 356 |
+
|
| 357 |
+
**Text:** Bên thuê lai dắt có nghĩa vụ chuẩn bị đầy đủ các điều kiện bảo đảm an toàn đối với tàu theo thỏa thuận trong hợp đồng lai dắt tàu biển .
|
| 358 |
+
**Tokens:** `Bên` `thuê` `lai dắt` `có` `nghĩa vụ` `chuẩn bị` `đầy đủ` `các` `điều kiện` `bảo đảm` `an toàn` `đối với` `tàu` `theo` `thỏa thuận` `trong` `hợp đồng` `lai dắt` `tàu biển` `.`
|
| 359 |
+
**UPOS:** NOUN VERB NOUN VERB NOUN VERB ADP DET NOUN VERB ADJ ADP NOUN ADP NOUN ADP NOUN VERB NOUN PUNCT
|
| 360 |
+
|
| 361 |
+
### Sample 25 [SUSPICIOUS] — s7069
|
| 362 |
+
|
| 363 |
+
**Text:** Đối với người đang bị truy nã thì bất kỳ người nào cũng có quyền bắt và giải ngay người bị bắt đến cơ quan Công an , Viện kiểm sát hoặc Ủy ban nhân dân nơi gần nhất .
|
| 364 |
+
**Tokens:** `Đối với` `người` `đang` `bị` `truy nã` `thì` `bất kỳ` `người` `nào` `cũng` `có` `quyền` `bắt` `và` `giải` `ngay` `người` `bị` `bắt` `đến` `cơ quan` `Công an` `,` `Viện kiểm sát` `hoặc` `Ủy ban nhân dân` `nơi` `gần` `nhất` `.`
|
| 365 |
+
**UPOS:** ADP NOUN ADV AUX VERB SCONJ DET NOUN PRON ADV VERB NOUN VERB CCONJ VERB PART NOUN AUX VERB ADP NOUN NOUN PUNCT NOUN CCONJ VERB NOUN ADJ ADV PUNCT
|
| 366 |
+
**Flags:** long_token: "Ủy ban nhân dân"
|
| 367 |
+
|
| 368 |
+
### Sample 26 [SUSPICIOUS] — s9600
|
| 369 |
+
|
| 370 |
+
**Text:** Luật này được Quốc hội nước Cộng hòa xã hội chủ nghĩa Việt Nam khóa XV , Kỳ họp thứ 10 thông qua ngày 10 tháng 12 năm 2025 .
|
| 371 |
+
**Tokens:** `Luật` `này` `được` `Quốc hội` `nước` `Cộng hòa` `xã hội chủ nghĩa Việt Nam` `khóa XV` `,` `Kỳ` `họp` `thứ` `10` `thông qua` `ngày` `10` `tháng` `12` `năm` `2025` `.`
|
| 372 |
+
**UPOS:** NOUN PRON AUX VERB NOUN NOUN VERB NOUN PUNCT PROPN VERB NOUN NUM VERB NOUN NUM NOUN NUM NOUN NUM PUNCT
|
| 373 |
+
**Flags:** long_token: "xã hội chủ nghĩa Việt Nam"; cross_boundary: "xã hội chủ nghĩa Việt Nam"; cross_boundary: "khóa XV"
|
| 374 |
+
|
| 375 |
+
### Sample 27 [SUSPICIOUS] — s4002
|
| 376 |
+
|
| 377 |
+
**Text:** Tội tổ chức tảo hôn Người nào tổ chức việc lấy vợ , lấy chồng cho những người chưa đến tuổi kết hôn , đã bị xử phạt vi phạm hành chính về hành vi này mà còn vi phạm , thì bị phạt tiền từ 10.000.000 đồng đến 30.000.000 đồng hoặc phạt cải tạo không giam giữ đến 02 năm .
|
| 378 |
+
**Tokens:** `Tội` `tổ chức` `tảo hôn Người` `nào` `tổ chức` `việc` `lấy` `vợ` `,` `lấy` `chồng` `cho` `những` `người` `chưa` `đến` `tuổi` `kết hôn` `,` `đã` `bị` `xử phạt` `vi phạm` `hành chính` `về` `hành vi` `này` `mà còn` `vi phạm` `,` `thì` `bị` `phạt` `tiền` `từ` `10.000.000` `đồng` `đến` `30.000.000` `đồng` `hoặc` `phạt` `cải tạo` `không` `giam giữ` `đến` `02` `năm` `.`
|
| 379 |
+
**UPOS:** PROPN VERB NOUN PRON VERB NOUN VERB NOUN PUNCT VERB NOUN ADP DET NOUN ADV VERB NOUN NUM PUNCT ADV AUX VERB VERB NOUN ADP NOUN PRON ADV VERB PUNCT SCONJ AUX VERB NOUN ADP NUM NOUN ADP NUM NOUN CCONJ VERB VERB ADV VERB ADP NUM NOUN PUNCT
|
| 380 |
+
**Flags:** cross_boundary: "tảo hôn Người"
|
| 381 |
+
|
| 382 |
+
### Sample 28 [SUSPICIOUS] — s1103
|
| 383 |
+
|
| 384 |
+
**Text:** Các bên thỏa thuận sử dụng biện pháp bảo đảm khác để thay thế cho cầm giữ .
|
| 385 |
+
**Tokens:** `Các` `bên` `thỏa thuận` `sử dụng` `biện pháp` `bảo đảm` `khác` `để` `thay thế` `cho` `cầm giữ` `.`
|
| 386 |
+
**UPOS:** DET NOUN NOUN VERB NOUN VERB ADJ ADP VERB ADP VERB PUNCT
|
| 387 |
+
|
| 388 |
+
### Sample 29 [SUSPICIOUS] — s2704
|
| 389 |
+
|
| 390 |
+
**Text:** Chủ tàu và thuyền trưởng có trách nhiệm tạo điều kiện để các cơ quan nhà nước có thẩm quyền quy định tại khoản 1 Điều này tiến hành thanh tra , kiểm tra tàu biển .
|
| 391 |
+
**Tokens:** `Chủ` `tàu` `và` `thuyền trưởng` `có` `trách nhiệm` `tạo` `điều kiện` `để` `các` `cơ quan` `nhà nước` `có` `thẩm quyền` `quy định` `tại` `khoản` `1` `Điều` `này` `tiến hành` `thanh tra` `,` `kiểm tra` `tàu biển` `.`
|
| 392 |
+
**UPOS:** NOUN NOUN CCONJ NOUN VERB NOUN VERB NOUN ADP DET NOUN NOUN VERB NOUN VERB ADP NOUN NUM NOUN PRON VERB VERB PUNCT VERB NOUN PUNCT
|
| 393 |
+
|
| 394 |
+
### Sample 30 [SUSPICIOUS] — s6629
|
| 395 |
+
|
| 396 |
+
**Text:** Hình thức xử phạt , thẩm quyền , trình tự , thủ tục xử phạt Hình thức xử phạt , thẩm quyền , trình tự , thủ tục xử phạt hành chính đối với các hành vi cản trở hoạt động tố tụng dân sự được thực hiện theo quy định của Luật xử lý vi phạm hành chính và pháp luật có liên quan .
|
| 397 |
+
**Tokens:** `Hình thức` `xử phạt` `,` `thẩm quyền` `,` `trình tự` `,` `thủ tục` `xử phạt` `Hình thức` `xử phạt` `,` `thẩm quyền` `,` `trình tự` `,` `thủ tục` `xử phạt` `hành chính` `đối với` `các` `hành vi` `cản trở` `hoạt động` `tố tụng dân sự` `được` `thực hiện` `theo` `quy định` `của` `Luật` `xử lý` `vi phạm` `hành chính` `và` `pháp luật` `có` `liên quan` `.`
|
| 398 |
+
**UPOS:** NOUN VERB PUNCT NOUN PUNCT NOUN PUNCT NOUN VERB NOUN VERB PUNCT NOUN PUNCT NOUN PUNCT NOUN VERB NOUN ADP DET NOUN VERB VERB VERB AUX VERB ADP NOUN ADP PROPN VERB VERB NOUN CCONJ NOUN VERB VERB PUNCT
|
| 399 |
+
**Flags:** long_token: "tố tụng dân sự"
|
| 400 |
+
|
| 401 |
+
### Sample 31 [RANDOM] — s9723
|
| 402 |
+
|
| 403 |
+
**Text:** Trách nhiệm của tổ chức kinh tế và tổ chức khác , gia đình , cá nhân về công tác dân số 1 .
|
| 404 |
+
**Tokens:** `Trách nhiệm` `của` `tổ chức` `kinh tế` `và` `tổ chức` `khác` `,` `gia đình` `,` `cá nhân` `về` `công tác` `dân số` `1` `.`
|
| 405 |
+
**UPOS:** NOUN ADP NOUN NOUN CCONJ VERB ADJ PUNCT NOUN PUNCT NOUN ADP NOUN NOUN NUM PUNCT
|
| 406 |
+
|
| 407 |
+
### Sample 32 [RANDOM] — s9128
|
| 408 |
+
|
| 409 |
+
**Text:** Chăm sóc trẻ em có khuyết tật .
|
| 410 |
+
**Tokens:** `Chăm sóc` `trẻ em` `có` `khuyết tật` `.`
|
| 411 |
+
**UPOS:** VERB NOUN VERB NOUN PUNCT
|
| 412 |
+
|
| 413 |
+
### Sample 33 [RANDOM] — s2500
|
| 414 |
+
|
| 415 |
+
**Text:** Thông tin trong biểu mẫu báo cáo y tế phải được giữ bí mật và chỉ sử dụng cho việc chuẩn đoán , chăm sóc , điều trị cho thuyền viên .
|
| 416 |
+
**Tokens:** `Thông tin` `trong` `biểu mẫu` `báo cáo` `y tế` `phải` `được` `giữ` `bí mật` `và` `chỉ` `sử dụng` `cho` `việc` `chuẩn đoán` `,` `chăm sóc` `,` `điều trị` `cho` `thuyền viên` `.`
|
| 417 |
+
**UPOS:** NOUN ADP DET VERB NOUN AUX AUX VERB ADJ CCONJ ADV VERB ADP NOUN VERB PUNCT VERB PUNCT VERB ADP NOUN PUNCT
|
| 418 |
+
|
| 419 |
+
### Sample 34 [RANDOM] — s1450
|
| 420 |
+
|
| 421 |
+
**Text:** Tặng cho động sản 1 .
|
| 422 |
+
**Tokens:** `Tặng` `cho` `động sản` `1` `.`
|
| 423 |
+
**UPOS:** VERB ADP NOUN NUM PUNCT
|
| 424 |
+
|
| 425 |
+
### Sample 35 [RANDOM] — s9324
|
| 426 |
+
|
| 427 |
+
**Text:** Chuyển hạng sĩ quan dự bị là chuyển sĩ quan dự bị từ hạng một sang hạng hai .
|
| 428 |
+
**Tokens:** `Chuyển` `hạng` `sĩ quan` `dự bị` `là` `chuyển` `sĩ quan` `dự bị` `từ` `hạng` `một` `sang` `hạng` `hai` `.`
|
| 429 |
+
**UPOS:** VERB NOUN NOUN ADJ AUX VERB NOUN ADJ ADP NOUN NUM VERB NOUN NUM PUNCT
|
| 430 |
+
**Flags:** inconsistent: "Chuyển" + "hạng" (also as "chuyển hạng")
|
| 431 |
+
|
| 432 |
+
### Sample 36 [RANDOM] — s1898
|
| 433 |
+
|
| 434 |
+
**Text:** Bồi thường thiệt hại do làm ô nhiễm môi trường Chủ thể làm ô nhiễm môi trường mà gây thiệt hại thì phải bồi thường theo quy định của pháp luật , kể cả trường hợp chủ thể đó không có lỗi .
|
| 435 |
+
**Tokens:** `Bồi thường` `thiệt hại` `do` `làm` `ô nhiễm` `môi trường` `Chủ thể` `làm` `ô nhiễm` `môi trường` `mà` `gây` `thiệt hại` `thì` `phải` `bồi thường` `theo` `quy định` `của` `pháp luật` `,` `kể cả` `trường hợp` `chủ thể` `đó` `không` `có` `lỗi` `.`
|
| 436 |
+
**UPOS:** VERB ADJ ADP VERB VERB NOUN NOUN VERB VERB NOUN SCONJ VERB ADJ SCONJ AUX VERB ADP NOUN ADP NOUN PUNCT CCONJ NOUN NOUN PRON ADV VERB NOUN PUNCT
|
| 437 |
+
**Flags:** inconsistent: "thì" + "phải" (also as "thì phải")
|
| 438 |
+
|
| 439 |
+
### Sample 37 [RANDOM] — s409
|
| 440 |
+
|
| 441 |
+
**Text:** Giao dịch dân sự thông qua phương tiện điện tử dưới hình thức thông điệp dữ liệu theo quy định của pháp luật về giao dịch điện tử được coi là giao dịch bằng văn bản .
|
| 442 |
+
**Tokens:** `Giao dịch` `dân sự` `thông qua` `phương tiện` `điện tử` `dưới` `hình thức` `thông điệp` `dữ liệu` `theo` `quy định` `của` `pháp luật` `về` `giao dịch` `điện tử` `được` `coi` `là` `giao dịch` `bằng` `văn bản` `.`
|
| 443 |
+
**UPOS:** VERB NOUN VERB NOUN NOUN ADP NOUN NOUN VERB ADP NOUN ADP NOUN ADP VERB NOUN AUX VERB AUX VERB ADP NOUN PUNCT
|
| 444 |
+
|
| 445 |
+
### Sample 38 [RANDOM] — s9712
|
| 446 |
+
|
| 447 |
+
**Text:** Chi đào tạo , bồi dưỡng , phát triển nguồn nhân lực làm công tác dân số .
|
| 448 |
+
**Tokens:** `Chi` `đào tạo` `,` `bồi dưỡng` `,` `phát triển` `nguồn` `nhân lực` `làm` `công tác` `dân số` `.`
|
| 449 |
+
**UPOS:** PROPN VERB PUNCT VERB PUNCT VERB NOUN NOUN VERB NOUN NOUN PUNCT
|
| 450 |
+
|
| 451 |
+
### Sample 39 [RANDOM] — s7677
|
| 452 |
+
|
| 453 |
+
**Text:** Giám định bổ sung 1 .
|
| 454 |
+
**Tokens:** `Giám định` `bổ sung` `1` `.`
|
| 455 |
+
**UPOS:** VERB VERB NUM PUNCT
|
| 456 |
+
|
| 457 |
+
### Sample 40 [RANDOM] — s2250
|
| 458 |
+
|
| 459 |
+
**Text:** Xây dựng công trình làm giảm hoặc mất tác dụng của công trình hàng hải .
|
| 460 |
+
**Tokens:** `Xây dựng` `công trình` `làm` `giảm` `hoặc` `mất` `tác dụng` `của` `công trình` `hàng hải` `.`
|
| 461 |
+
**UPOS:** VERB NOUN VERB VERB CCONJ VERB NOUN ADP NOUN NOUN PUNCT
|
| 462 |
+
|
| 463 |
+
### Sample 41 [RANDOM] — s1359
|
| 464 |
+
|
| 465 |
+
**Text:** Tài sản bán thuộc sở hữu của người bán hoặc người bán có quyền bán .
|
| 466 |
+
**Tokens:** `Tài sản` `bán` `thuộc` `sở hữu` `của` `người` `bán` `hoặc` `người` `bán` `có` `quyền` `bán` `.`
|
| 467 |
+
**UPOS:** NOUN VERB VERB NOUN ADP NOUN VERB CCONJ NOUN VERB VERB NOUN VERB PUNCT
|
| 468 |
+
|
| 469 |
+
### Sample 42 [RANDOM] — s8218
|
| 470 |
+
|
| 471 |
+
**Text:** Trong thời hạn 10 ngày kể từ ngày nhận được đơn kháng cáo quá hạn và chứng cứ , tài liệu , đồ vật kèm theo ( nếu có ) , Tòa án cấp phúc thẩm thành lập Hội đồng gồm ba Thẩm phán để xem xét kháng cáo quá hạn .
|
| 472 |
+
**Tokens:** `Trong` `thời hạn` `10` `ngày` `kể` `từ` `ngày` `nhận` `được` `đơn` `kháng cáo` `quá` `hạn` `và` `chứng cứ` `,` `tài liệu` `,` `đồ vật` `kèm` `theo` `(` `nếu` `có` `)` `,` `Tòa án` `cấp` `phúc thẩm` `thành lập` `Hội đồng` `gồm` `ba` `Thẩm phán` `để` `xem xét` `kháng cáo` `quá` `hạn` `.`
|
| 473 |
+
**UPOS:** ADP NOUN NUM NOUN VERB ADP NOUN VERB AUX NOUN VERB ADV ADJ CCONJ NOUN PUNCT NOUN PUNCT NOUN VERB VERB PUNCT SCONJ VERB PUNCT PUNCT VERB NOUN VERB VERB NOUN VERB NUM NOUN ADP VERB ADJ ADV ADJ PUNCT
|
| 474 |
+
|
| 475 |
+
### Sample 43 [RANDOM] — s5711
|
| 476 |
+
|
| 477 |
+
**Text:** Bảo đảm tính khách quan của người làm chứng 1 .
|
| 478 |
+
**Tokens:** `Bảo đảm` `tính` `khách quan` `của` `người` `làm chứng` `1` `.`
|
| 479 |
+
**UPOS:** VERB NOUN ADJ ADP NOUN VERB NUM PUNCT
|
| 480 |
+
|
| 481 |
+
### Sample 44 [RANDOM] — s7157
|
| 482 |
+
|
| 483 |
+
**Text:** Những người có thẩm quyền quy định tại khoản 1 Điều 113 của Bộ luật này , Thẩm phán chủ tọa phiên tòa có quyền quyết định tạm hoãn xuất cảnh .
|
| 484 |
+
**Tokens:** `Những` `người` `có` `thẩm quyền` `quy định` `tại` `khoản` `1` `Điều` `113` `của` `Bộ luật` `này` `,` `Thẩm phán` `chủ` `tọa` `phiên` `tòa` `có` `quyền` `quyết định` `tạm` `hoãn` `xuất cảnh` `.`
|
| 485 |
+
**UPOS:** DET NOUN VERB NOUN VERB ADP NOUN NUM NOUN ADJ ADP NOUN PRON PUNCT NOUN NOUN NOUN NOUN NOUN VERB NOUN VERB ADJ VERB VERB PUNCT
|
| 486 |
+
**Flags:** inconsistent: "chủ" + "tọa" (also as "chủ tọa"); inconsistent: "phiên" + "tòa" (also as "phiên tòa")
|
| 487 |
+
|
| 488 |
+
### Sample 45 [RANDOM] — s1427
|
| 489 |
+
|
| 490 |
+
**Text:** Bên bán phải chịu mọi rủi ro xảy ra đối với vật , trừ trường hợp có thỏa thuận khác .
|
| 491 |
+
**Tokens:** `Bên` `bán` `phải` `chịu` `mọi` `rủi ro` `xảy` `ra` `đối với` `vật` `,` `trừ` `trường hợp` `có` `thỏa thuận` `khác` `.`
|
| 492 |
+
**UPOS:** NOUN VERB AUX VERB DET ADJ VERB VERB ADP NOUN PUNCT VERB NOUN VERB NOUN ADJ PUNCT
|
| 493 |
+
|
| 494 |
+
### Sample 46 [RANDOM] — s9190
|
| 495 |
+
|
| 496 |
+
**Text:** Xây dựng chính sách thu hút nguồn nhân lực làm việc trong lĩnh vực lão khoa .
|
| 497 |
+
**Tokens:** `Xây dựng` `chính sách` `thu hút` `nguồn` `nhân lực` `làm việc` `trong` `lĩnh vực` `lão khoa` `.`
|
| 498 |
+
**UPOS:** VERB NOUN ADP NOUN NOUN VERB ADP NOUN NOUN PUNCT
|
| 499 |
+
|
| 500 |
+
### Sample 47 [RANDOM] — s530
|
| 501 |
+
|
| 502 |
+
**Text:** Quyền khác đối với tài sản bao gồm : a ) Quyền đối với bất động sản liền kề ; b ) Quyền hưởng dụng ; c ) Quyền bề mặt .
|
| 503 |
+
**Tokens:** `Quyền` `khác` `đối với` `tài sản` `bao gồm` `:` `a` `)` `Quyền` `đối với` `bất động sản` `liền` `kề` `;` `b` `)` `Quyền` `hưởng dụng` `;` `c` `)` `Quyền` `bề mặt` `.`
|
| 504 |
+
**UPOS:** NOUN ADJ ADP NOUN VERB PUNCT NOUN PUNCT PROPN ADP NOUN ADV VERB PUNCT NOUN PUNCT PROPN VERB PUNCT NOUN PUNCT PROPN VERB PUNCT
|
| 505 |
+
|
| 506 |
+
### Sample 48 [RANDOM] — s6985
|
| 507 |
+
|
| 508 |
+
**Text:** Trường hợp do trở ngại khách quan thì thời hạn này có thể kéo dài nhưng không quá 15 ngày .
|
| 509 |
+
**Tokens:** `Trường hợp` `do` `trở ngại` `khách quan` `thì` `thời hạn` `này` `có thể` `kéo dài` `nhưng` `không` `quá` `15` `ngày` `.`
|
| 510 |
+
**UPOS:** NOUN ADP NOUN ADJ SCONJ NOUN PRON ADV VERB CCONJ ADV ADV NUM NOUN PUNCT
|
| 511 |
+
|
| 512 |
+
### Sample 49 [RANDOM] — s9520
|
| 513 |
+
|
| 514 |
+
**Text:** Việc điều chỉnh quy mô , cơ cấu dân số bảo đảm phù hợp với sự phát triển kinh tế - xã hội thông qua các biện pháp sau đây : a ) Điều chỉnh mức sinh ; b ) Giảm mức tử vong ; c ) Các biện pháp khác .
|
| 515 |
+
**Tokens:** `Việc` `điều chỉnh` `quy mô` `,` `cơ cấu` `dân số` `bảo đảm` `phù hợp` `với` `sự` `phát triển` `kinh tế` `-` `xã hội` `thông qua` `các` `biện pháp` `sau` `đây` `:` `a` `)` `Điều chỉnh` `mức` `sinh` `;` `b` `)` `Giảm` `mức` `tử vong` `;` `c` `)` `Các` `biện pháp` `khác` `.`
|
| 516 |
+
**UPOS:** NOUN VERB NOUN PUNCT NUM NOUN VERB VERB ADP NOUN VERB NOUN PUNCT NOUN VERB DET NOUN NOUN PRON PUNCT NOUN PUNCT VERB NOUN VERB PUNCT NOUN PUNCT PUNCT NOUN VERB PUNCT NOUN PUNCT DET NOUN ADJ PUNCT
|
| 517 |
+
|
| 518 |
+
### Sample 50 [RANDOM] — s9771
|
| 519 |
+
|
| 520 |
+
**Text:** Ban hành và áp dụng tiêu chuẩn về phòng cháy và chữa cháy 1 .
|
| 521 |
+
**Tokens:** `Ban hành` `và` `áp dụng` `tiêu chuẩn` `về` `phòng` `cháy` `và` `chữa cháy` `1` `.`
|
| 522 |
+
**UPOS:** NOUN CCONJ VERB NOUN ADP NOUN VERB CCONJ VERB NUM PUNCT
|
| 523 |
+
|
| 524 |
+
### Sample 51 [RANDOM] — s1491
|
| 525 |
+
|
| 526 |
+
**Text:** Hợp đồng thuê nhà ở , hợp đồng thuê nhà để sử dụng vào mục đích khác được thực hiện theo quy định của Bộ luật này , Luật nhà ở và quy định khác của pháp luật có liên quan .
|
| 527 |
+
**Tokens:** `Hợp đồng` `thuê` `nhà` `ở` `,` `hợp đồng` `thuê` `nhà` `để` `sử dụng` `vào` `mục đích` `khác` `được` `thực hiện` `theo` `quy định` `của` `Bộ luật` `này` `,` `Luật` `nhà` `ở` `và` `quy định` `khác` `của` `pháp luật` `có` `liên quan` `.`
|
| 528 |
+
**UPOS:** NOUN VERB NOUN VERB PUNCT NOUN VERB NOUN ADP VERB ADP NOUN ADJ AUX VERB ADP NOUN ADP NOUN PRON PUNCT NOUN NOUN VERB CCONJ NOUN ADJ ADP NOUN VERB VERB PUNCT
|
| 529 |
+
**Flags:** inconsistent: "nhà" + "ở" (also as "nhà ở"); inconsistent: "nhà" + "ở" (also as "nhà ở")
|
| 530 |
+
|
| 531 |
+
### Sample 52 [RANDOM] — s8958
|
| 532 |
+
|
| 533 |
+
**Text:** Tăng cường thanh tra , kiểm tra đột xuất đối với tổ chức , cá nhân có dấu hiệu tham nhũng , tiêu cực ; các trường hợp có dấu hiệu tội phạm qua thanh tra , kiểm tra phải chuyển hồ sơ vụ việc cho cơ quan điều tra ; xử lý nghiêm và công khai kết quả xử lý hành vi tham nhũng .
|
| 534 |
+
**Tokens:** `Tăng cường` `thanh tra` `,` `kiểm tra` `đột xuất` `đối với` `tổ chức` `,` `cá nhân` `có` `dấu hiệu` `tham nhũng` `,` `tiêu cực` `;` `các` `trường hợp` `có` `dấu hiệu` `tội phạm` `qua` `thanh tra` `,` `kiểm tra` `phải` `chuyển` `hồ sơ` `vụ việc` `cho` `cơ quan` `điều tra` `;` `xử lý` `nghiêm` `và` `công khai` `kết quả` `xử lý` `hành vi` `tham nhũng` `.`
|
| 535 |
+
**UPOS:** VERB VERB PUNCT VERB NOUN ADP VERB PUNCT NOUN VERB NOUN VERB PUNCT NOUN PUNCT DET NOUN VERB NOUN NOUN ADP VERB PUNCT VERB AUX VERB NOUN NOUN ADP NOUN VERB PUNCT VERB ADJ CCONJ VERB NOUN VERB NOUN VERB PUNCT
|
| 536 |
+
|
| 537 |
+
### Sample 53 [RANDOM] — s8276
|
| 538 |
+
|
| 539 |
+
**Text:** Việc đình chỉ xét xử phúc thẩm trước khi mở phiên tòa do Thẩm phán chủ tọa phiên tòa quyết định , tại phiên tòa do Hội đồng xét xử quyết định .
|
| 540 |
+
**Tokens:** `Việc` `đình chỉ` `xét xử` `phúc thẩm` `trước` `khi` `mở` `phiên` `tòa` `do` `Thẩm phán` `chủ` `tọa` `phiên` `tòa` `quyết định` `,` `tại` `phiên` `tòa` `do` `Hội đồng` `xét xử` `quyết định` `.`
|
| 541 |
+
**UPOS:** NOUN NOUN VERB VERB ADP NOUN VERB NOUN VERB ADP NOUN NOUN NOUN NOUN NOUN VERB PUNCT ADP NOUN NOUN ADP NOUN VERB VERB PUNCT
|
| 542 |
+
**Flags:** inconsistent: "phiên" + "tòa" (also as "phiên tòa"); inconsistent: "chủ" + "tọa" (also as "chủ tọa"); inconsistent: "phiên" + "tòa" (also as "phiên tòa"); inconsistent: "phiên" + "tòa" (also as "phiên tòa")
|
| 543 |
+
|
| 544 |
+
### Sample 54 [RANDOM] — s1430
|
| 545 |
+
|
| 546 |
+
**Text:** Bên dùng thử không phải chịu trách nhiệm về những hao mòn thông thường do việc dùng thử gây ra và không phải hoàn trả hoa lợi do việc dùng thử mang lại .
|
| 547 |
+
**Tokens:** `Bên` `dùng` `thử` `không` `phải` `chịu` `trách nhiệm` `về` `những` `hao mòn` `thông thường` `do` `việc` `dùng` `thử` `gây` `ra` `và` `không` `phải` `hoàn trả` `hoa lợi` `do` `việc` `dùng` `thử` `mang` `lại` `.`
|
| 548 |
+
**UPOS:** NOUN VERB VERB ADV AUX VERB NOUN ADP DET NOUN ADJ ADP NOUN VERB VERB VERB VERB CCONJ ADV AUX VERB NOUN ADP NOUN VERB VERB VERB ADV PUNCT
|
| 549 |
+
|
| 550 |
+
### Sample 55 [RANDOM] — s2626
|
| 551 |
+
|
| 552 |
+
**Text:** Nguyên tắc đối với tàu quân sự nước ngoài đến Việt Nam 1 .
|
| 553 |
+
**Tokens:** `Nguyên tắc` `đối với` `tàu` `quân sự` `nước ngoài` `đến` `Việt Nam` `1` `.`
|
| 554 |
+
**UPOS:** NOUN ADP NOUN NOUN NOUN ADP PROPN NUM PUNCT
|
| 555 |
+
|
| 556 |
+
### Sample 56 [RANDOM] — s1490
|
| 557 |
+
|
| 558 |
+
**Text:** Hợp đồng thuê tài sản Hợp đồng thuê tài sản là sự thỏa thuận giữa các bên , theo đó bên cho thuê giao tài sản cho bên thuê để sử dụng trong một thời hạn , bên thuê phải trả tiền thuê .
|
| 559 |
+
**Tokens:** `Hợp đồng` `thuê` `tài sản` `Hợp đồng` `thuê` `tài sản` `là` `sự` `thỏa thuận` `giữa` `các` `bên` `,` `theo` `đó` `bên` `cho` `thuê` `giao` `tài sản` `cho` `bên` `thuê` `để` `sử dụng` `trong` `một` `thời hạn` `,` `bên` `thuê` `phải` `trả` `tiền` `thuê` `.`
|
| 560 |
+
**UPOS:** NOUN VERB NOUN NOUN VERB NOUN AUX NOUN VERB ADP DET NOUN PUNCT VERB PRON NOUN VERB ADV VERB NOUN ADP NOUN VERB ADP VERB ADP NUM NOUN PUNCT NOUN VERB AUX VERB NOUN VERB PUNCT
|
| 561 |
+
|
| 562 |
+
### Sample 57 [RANDOM] — s5751
|
| 563 |
+
|
| 564 |
+
**Text:** Xem xét vật chứng Vật chứng , ảnh hoặc biên bản xác nhận vật chứng được đưa ra để xem xét tại phiên tòa .
|
| 565 |
+
**Tokens:** `Xem xét` `vật chứng` `Vật chứng` `,` `ảnh` `hoặc` `biên bản` `xác nhận` `vật chứng` `được` `đưa` `ra` `để` `xem xét` `tại` `phiên` `tòa` `.`
|
| 566 |
+
**UPOS:** VERB NOUN NOUN PUNCT NOUN CCONJ NOUN VERB NOUN AUX VERB VERB ADP VERB ADP NOUN NOUN PUNCT
|
| 567 |
+
**Flags:** inconsistent: "phiên" + "tòa" (also as "phiên tòa")
|
| 568 |
+
|
| 569 |
+
### Sample 58 [RANDOM] — s5354
|
| 570 |
+
|
| 571 |
+
**Text:** Trường hợp đình chỉ giải quyết vụ án quy định tại điểm c khoản 1 Điều 217 , điểm b khoản 1 Điều 299 của Bộ luật này thì nguyên đơn phải chịu chi phí ủy thác tư pháp ra nước ngoài .
|
| 572 |
+
**Tokens:** `Trường hợp` `đình chỉ` `giải quyết` `vụ` `án` `quy định` `tại` `điểm` `c` `khoản` `1` `Điều` `217` `,` `điểm` `b` `khoản` `1` `Điều` `299` `của` `Bộ luật` `này` `thì` `nguyên đơn` `phải` `chịu` `chi phí` `ủy thác` `tư pháp` `ra` `nước ngoài` `.`
|
| 573 |
+
**UPOS:** NOUN NOUN VERB DET NOUN VERB ADP NOUN VERB NOUN NUM NOUN ADJ PUNCT NOUN VERB NOUN NUM NOUN ADJ ADP NOUN PRON SCONJ VERB AUX VERB NOUN VERB VERB VERB NOUN PUNCT
|
| 574 |
+
|
| 575 |
+
### Sample 59 [RANDOM] — s5113
|
| 576 |
+
|
| 577 |
+
**Text:** Quyền , nghĩa vụ của người đại diện 1 .
|
| 578 |
+
**Tokens:** `Quyền` `,` `nghĩa vụ` `của` `người` `đại diện` `1` `.`
|
| 579 |
+
**UPOS:** NOUN PUNCT NOUN ADP NOUN VERB NUM PUNCT
|
| 580 |
+
|
| 581 |
+
### Sample 60 [RANDOM] — s5049
|
| 582 |
+
|
| 583 |
+
**Text:** Đưa ra yêu cầu phản tố đối với nguyên đơn , nếu có liên quan đến yêu cầu của nguyên đơn hoặc đề nghị đối trừ với nghĩa vụ của nguyên đơn .
|
| 584 |
+
**Tokens:** `Đưa` `ra` `yêu cầu` `phản tố` `đối với` `nguyên đơn` `,` `nếu` `có` `liên quan` `đến` `yêu cầu` `của` `nguyên đơn` `hoặc` `đề nghị` `đối trừ` `với` `nghĩa vụ` `của` `nguyên đơn` `.`
|
| 585 |
+
**UPOS:** VERB VERB VERB NOUN ADP NOUN PUNCT SCONJ ADV VERB ADP VERB ADP NOUN CCONJ VERB VERB ADP NOUN ADP NOUN PUNCT
|
| 586 |
+
|
| 587 |
+
### Sample 61 [RANDOM] — s5783
|
| 588 |
+
|
| 589 |
+
**Text:** Hội đồng xét xử phải thông báo cho những người có mặt tại phiên tòa và người tham gia tố tụng vắng mặt tại phiên tòa về giờ , ngày và địa điểm tuyên án .
|
| 590 |
+
**Tokens:** `Hội đồng` `xét xử` `phải` `thông báo` `cho` `những` `người` `có mặt` `tại` `phiên` `tòa` `và` `người` `tham gia` `tố tụng` `vắng mặt` `tại` `phiên` `tòa` `về` `giờ` `,` `ngày` `và` `địa điểm` `tuyên án` `.`
|
| 591 |
+
**UPOS:** NOUN VERB AUX VERB ADP DET NOUN VERB ADP NOUN NOUN CCONJ NOUN VERB VERB NOUN ADP NOUN NOUN ADP NOUN PUNCT NOUN CCONJ NOUN VERB PUNCT
|
| 592 |
+
**Flags:** inconsistent: "phiên" + "tòa" (also as "phiên tòa"); inconsistent: "phiên" + "tòa" (also as "phiên tòa")
|
| 593 |
+
|
| 594 |
+
### Sample 62 [RANDOM] — s1522
|
| 595 |
+
|
| 596 |
+
**Text:** Thời hạn thuê khoán Thời hạn thuê khoán do các bên thỏa thuận .
|
| 597 |
+
**Tokens:** `Thời hạn` `thuê` `khoán` `Thời hạn` `thuê` `khoán` `do` `các` `bên` `thỏa thuận` `.`
|
| 598 |
+
**UPOS:** NOUN VERB NOUN VERB VERB NOUN ADP DET NOUN NOUN PUNCT
|
| 599 |
+
**Flags:** inconsistent: "thuê" + "khoán" (also as "thuê khoán"); inconsistent: "thuê" + "khoán" (also as "thuê khoán")
|
| 600 |
+
|
| 601 |
+
### Sample 63 [RANDOM] — s3758
|
| 602 |
+
|
| 603 |
+
**Text:** Người dưới 18 tuổi phạm tội bị phạt cải tạo không giam giữ hoặc phạt tù , nếu có tiến bộ và đã chấp hành được một phần tư thời hạn , thì được Tòa án xét giảm ; riêng đối với hình phạt tù , mỗi lần có thể giảm đến 04 năm nhưng phải bảo đảm đã chấp hành ít nhất là hai phần năm mức hình phạt đã tuyên .
|
| 604 |
+
**Tokens:** `Người` `dưới` `18` `tuổi` `phạm tội` `bị` `phạt` `cải tạo` `không` `giam giữ` `hoặc` `phạt` `tù` `,` `nếu` `có` `tiến bộ` `và` `đã` `chấp hành` `được` `một` `phần tư` `thời hạn` `,` `thì` `được` `Tòa án` `xét` `giảm` `;` `riêng` `đối với` `hình phạt` `tù` `,` `mỗi` `lần` `có thể` `giảm` `đến` `04` `năm` `nhưng` `phải` `bảo đảm` `đã` `chấp hành` `ít nhất` `là` `hai` `phần` `năm` `mức` `hình phạt` `đã` `tuyên` `.`
|
| 605 |
+
**UPOS:** NOUN ADP NUM NOUN VERB AUX VERB VERB ADV VERB CCONJ VERB NOUN PUNCT SCONJ VERB NOUN CCONJ ADV VERB ADV NUM NOUN ADJ PUNCT SCONJ AUX VERB VERB VERB PUNCT ADV ADP VERB NOUN PUNCT DET NOUN ADV VERB ADP NUM NOUN SCONJ AUX VERB ADV VERB ADV AUX NUM NUM NUM NOUN NOUN ADV VERB PUNCT
|
| 606 |
+
|
| 607 |
+
### Sample 64 [RANDOM] — s5599
|
| 608 |
+
|
| 609 |
+
**Text:** Biên bản này được gửi ngay cho các đương sự tham gia hòa giải .
|
| 610 |
+
**Tokens:** `Biên bản` `này` `được` `gửi` `ngay` `cho` `các` `đương sự` `tham gia` `hòa` `giải` `.`
|
| 611 |
+
**UPOS:** NOUN PRON AUX VERB ADV ADP DET NOUN VERB VERB NOUN PUNCT
|
| 612 |
+
**Flags:** inconsistent: "hòa" + "giải" (also as "hòa giải")
|
| 613 |
+
|
| 614 |
+
### Sample 65 [RANDOM] — s8605
|
| 615 |
+
|
| 616 |
+
**Text:** Căn cứ , trình tự , thủ tục thay đổi , bổ sung quyết định khởi tố vụ án hình sự được thực hiện theo quy định tại Điều 156 của Bộ luật này .
|
| 617 |
+
**Tokens:** `Căn cứ` `,` `trình tự` `,` `thủ tục` `thay đổi` `,` `bổ sung` `quyết định` `khởi tố` `vụ` `án` `hình sự` `được` `thực hiện` `theo` `quy định` `tại` `Điều` `156` `của` `Bộ luật` `này` `.`
|
| 618 |
+
**UPOS:** NOUN PUNCT NOUN PUNCT NOUN VERB PUNCT VERB VERB VERB DET NOUN NOUN AUX VERB ADP NOUN ADP NOUN ADJ ADP NOUN PRON PUNCT
|
| 619 |
+
|
| 620 |
+
### Sample 66 [RANDOM] — s7443
|
| 621 |
+
|
| 622 |
+
**Text:** Ý kiến bổ sung , nhận xét được ghi vào biên bản ; trường hợp không chấp nhận bổ sung thì ghi rõ lý do vào biên bản .
|
| 623 |
+
**Tokens:** `Ý kiến` `bổ sung` `,` `nhận xét` `được` `ghi` `vào` `biên bản` `;` `trường hợp` `không` `chấp nhận` `bổ sung` `thì` `ghi` `rõ` `lý do` `vào` `biên bản` `.`
|
| 624 |
+
**UPOS:** NOUN VERB PUNCT NOUN AUX VERB ADP NOUN PUNCT NOUN ADV VERB VERB SCONJ VERB ADJ NOUN ADP NOUN PUNCT
|
| 625 |
+
|
| 626 |
+
### Sample 67 [RANDOM] — s8713
|
| 627 |
+
|
| 628 |
+
**Text:** Quyết định áp dụng thủ tục rút gọn 1 .
|
| 629 |
+
**Tokens:** `Quyết định` `áp dụng` `thủ tục` `rút` `gọn` `1` `.`
|
| 630 |
+
**UPOS:** NOUN VERB NOUN VERB ADJ NUM PUNCT
|
| 631 |
+
|
| 632 |
+
### Sample 68 [RANDOM] — s700
|
| 633 |
+
|
| 634 |
+
**Text:** Chiếm hữu , được lợi về tài sản theo quy định tại Điều 236 của Bộ luật này .
|
| 635 |
+
**Tokens:** `Chiếm hữu` `,` `được` `lợi` `về` `tài sản` `theo` `quy định` `tại` `Điều` `236` `của` `Bộ luật` `này` `.`
|
| 636 |
+
**UPOS:** NOUN PUNCT AUX VERB VERB NOUN ADP NOUN ADP NOUN ADJ ADP NOUN PRON PUNCT
|
| 637 |
+
|
| 638 |
+
### Sample 69 [RANDOM] — s8596
|
| 639 |
+
|
| 640 |
+
**Text:** Biên bản hòa giải được giao ngay cho những người tham gia hòa giải .
|
| 641 |
+
**Tokens:** `Biên bản` `hòa` `giải` `được` `giao` `ngay` `cho` `những` `người` `tham gia` `hòa` `giải` `.`
|
| 642 |
+
**UPOS:** DET VERB NOUN AUX VERB ADV ADP DET NOUN VERB VERB NOUN PUNCT
|
| 643 |
+
**Flags:** inconsistent: "hòa" + "giải" (also as "hòa giải"); inconsistent: "hòa" + "giải" (also as "hòa giải")
|
| 644 |
+
|
| 645 |
+
### Sample 70 [RANDOM] — s8483
|
| 646 |
+
|
| 647 |
+
**Text:** Căn cứ để kháng nghị theo thủ tục tái thẩm Bản án , quyết định của Tòa án đã có hiệu lực pháp luật bị kháng nghị theo thủ tục tái thẩm khi có một trong các căn cứ : 1 .
|
| 648 |
+
**Tokens:** `Căn cứ` `để` `kháng nghị` `theo` `thủ tục` `tái thẩm` `Bản án` `,` `quyết định` `của` `Tòa án` `đã` `có` `hiệu lực` `pháp luật` `bị` `kháng nghị` `theo` `thủ tục` `tái thẩm` `khi` `có` `một` `trong` `các` `căn cứ` `:` `1` `.`
|
| 649 |
+
**UPOS:** NOUN ADP VERB ADP NOUN NOUN VERB PUNCT VERB ADP NOUN ADV VERB NUM NOUN AUX VERB ADP NOUN NOUN NOUN VERB NUM ADP DET NOUN PUNCT NUM PUNCT
|
| 650 |
+
|
| 651 |
+
### Sample 71 [RANDOM] — s569
|
| 652 |
+
|
| 653 |
+
**Text:** Đối với mốc giới là tường nhà chung , chủ sở hữu bất động sản liền kề không được trổ cửa sổ , lỗ thông khí hoặc đục tường để đặt kết cấu xây dựng , trừ trường hợp được chủ sở hữu bất động sản liền kề đồng ý .
|
| 654 |
+
**Tokens:** `Đối với` `mốc giới` `là` `tường` `nhà` `chung` `,` `chủ sở hữu` `bất động sản` `liền` `kề` `không` `được` `trổ` `cửa sổ` `,` `lỗ` `thông khí` `hoặc` `đục` `tường` `để` `đặt` `kết cấu` `xây dựng` `,` `trừ` `trường hợp` `được` `chủ sở hữu` `bất động sản` `liền` `kề` `đồng ý` `.`
|
| 655 |
+
**UPOS:** ADP NOUN AUX NOUN NOUN ADJ PUNCT DET VERB ADV VERB ADV AUX VERB NOUN PUNCT NOUN VERB CCONJ ADJ NOUN ADP VERB NUM VERB PUNCT VERB NOUN AUX VERB VERB ADV VERB VERB PUNCT
|
| 656 |
+
|
| 657 |
+
### Sample 72 [RANDOM] — s8472
|
| 658 |
+
|
| 659 |
+
**Text:** Nếu hủy để xét xử lại thì tùy trường hợp , Hội đồng giám đốc thẩm có thể quyết định xét xử lại từ cấp sơ thẩm hoặc cấp phúc thẩm .
|
| 660 |
+
**Tokens:** `Nếu` `hủy` `để` `xét xử` `lại` `thì` `tùy` `trường hợp` `,` `Hội đồng` `giám đốc thẩm` `có thể` `quyết định` `xét xử` `lại` `từ` `cấp` `sơ thẩm` `hoặc` `cấp` `phúc thẩm` `.`
|
| 661 |
+
**UPOS:** SCONJ VERB ADP VERB VERB SCONJ DET NOUN PUNCT NOUN VERB ADV VERB VERB VERB ADP NOUN VERB CCONJ VERB VERB PUNCT
|
| 662 |
+
|
| 663 |
+
### Sample 73 [RANDOM] — s9734
|
| 664 |
+
|
| 665 |
+
**Text:** Người có từ 02 con đẻ trở lên .
|
| 666 |
+
**Tokens:** `Người` `có` `từ` `02` `con đẻ` `trở` `lên` `.`
|
| 667 |
+
**UPOS:** NOUN VERB ADP NOUN NOUN VERB VERB PUNCT
|
| 668 |
+
**Flags:** inconsistent: "trở" + "lên" (also as "trở lên")
|
| 669 |
+
|
| 670 |
+
### Sample 74 [RANDOM] — s1511
|
| 671 |
+
|
| 672 |
+
**Text:** Trường hợp bên thuê sử dụng tài sản không đúng mục đích , không đúng công dụng thì bên cho thuê có quyền đơn phương chấm dứt thực hiện hợp đồng và yêu cầu bồi thường thiệt hại .
|
| 673 |
+
**Tokens:** `Trường hợp` `bên` `thuê` `sử dụng` `tài sản` `không` `đúng` `mục đích` `,` `không` `đúng` `công dụng` `thì` `bên` `cho` `thuê` `có` `quyền` `đơn phương` `chấm dứt` `thực hiện` `hợp đồng` `và` `yêu cầu` `bồi thường` `thiệt hại` `.`
|
| 674 |
+
**UPOS:** NOUN NOUN VERB VERB NOUN ADV ADJ NOUN PUNCT ADV ADJ NOUN SCONJ NOUN VERB VERB VERB NOUN VERB VERB VERB NOUN CCONJ VERB VERB ADJ PUNCT
|
| 675 |
+
|
| 676 |
+
### Sample 75 [RANDOM] — s3806
|
| 677 |
+
|
| 678 |
+
**Text:** Người chuẩn bị phạm tội này , thì bị phạt tù từ 01 năm đến 05 năm .
|
| 679 |
+
**Tokens:** `Người` `chuẩn bị` `phạm tội` `này` `,` `thì` `bị` `phạt` `tù` `từ` `01` `năm` `đến` `05` `năm` `.`
|
| 680 |
+
**UPOS:** NOUN VERB VERB PRON PUNCT SCONJ AUX VERB VERB ADP NUM NOUN ADP NUM NOUN PUNCT
|
| 681 |
+
|
| 682 |
+
### Sample 76 [RANDOM] — s2604
|
| 683 |
+
|
| 684 |
+
**Text:** Nhiệm vụ , quyền hạn của Giám đốc Cảng vụ hàng hải 1 .
|
| 685 |
+
**Tokens:** `Nhiệm vụ` `,` `quyền hạn` `của` `Giám đốc` `Cảng vụ` `hàng hải` `1` `.`
|
| 686 |
+
**UPOS:** NOUN PUNCT NOUN ADP NOUN NOUN NOUN NUM PUNCT
|
| 687 |
+
|
| 688 |
+
### Sample 77 [RANDOM] — s1131
|
| 689 |
+
|
| 690 |
+
**Text:** Trường hợp bên có nghĩa vụ chậm trả tiền thì bên đó phải trả lãi đối với số tiền chậm trả tương ứng với thời gian chậm trả .
|
| 691 |
+
**Tokens:** `Trường hợp` `bên` `có` `nghĩa vụ` `chậm` `trả` `tiền` `thì` `bên` `đó` `phải` `trả` `lãi` `đối với` `số` `tiền` `chậm` `trả` `tương ứng` `với` `thời gian` `chậm` `trả` `.`
|
| 692 |
+
**UPOS:** NOUN NOUN VERB NOUN ADJ VERB NOUN SCONJ NOUN PRON AUX VERB NOUN ADP NOUN NOUN ADJ VERB VERB ADP NOUN ADJ VERB PUNCT
|
| 693 |
+
|
| 694 |
+
### Sample 78 [RANDOM] — s8270
|
| 695 |
+
|
| 696 |
+
**Text:** Đối với bị cáo đang bị tạm giam , nếu xét thấy cần tiếp tục tạm giam để hoàn thành việc xét xử thì Hội đồng xét xử ra quyết định tạm giam cho đến khi kết thúc phiên tòa .
|
| 697 |
+
**Tokens:** `Đối với` `bị cáo` `đang` `bị` `tạm` `giam` `,` `nếu` `xét` `thấy` `cần` `tiếp tục` `tạm` `giam` `để` `hoàn thành` `việc` `xét xử` `thì` `Hội đồng` `xét xử` `ra` `quyết định` `tạm` `giam` `cho` `đến` `khi` `kết thúc` `phiên` `tòa` `.`
|
| 698 |
+
**UPOS:** ADP NOUN ADV AUX ADJ VERB PUNCT SCONJ VERB VERB AUX VERB ADJ VERB ADP VERB NOUN VERB SCONJ NOUN VERB VERB NOUN ADJ VERB ADP ADP NOUN VERB NOUN VERB PUNCT
|
| 699 |
+
**Flags:** inconsistent: "phiên" + "tòa" (also as "phiên tòa")
|
| 700 |
+
|
| 701 |
+
### Sample 79 [RANDOM] — s5525
|
| 702 |
+
|
| 703 |
+
**Text:** Thời hạn thực hiện việc sửa đổi , bổ sung đơn khởi kiện không tính vào thời hiệu khởi kiện .
|
| 704 |
+
**Tokens:** `Thời hạn` `thực hiện` `việc` `sửa đổi` `,` `bổ sung` `đơn` `khởi kiện` `không` `tính` `vào` `thời hiệu` `khởi kiện` `.`
|
| 705 |
+
**UPOS:** NOUN VERB NOUN NUM PUNCT VERB NOUN VERB ADV VERB ADP NOUN NOUN PUNCT
|
| 706 |
+
|
| 707 |
+
### Sample 80 [RANDOM] — s4804
|
| 708 |
+
|
| 709 |
+
**Text:** Tranh chấp về xác định cha , mẹ cho con hoặc xác định con cho cha , mẹ .
|
| 710 |
+
**Tokens:** `Tranh chấp` `về` `xác định` `cha` `,` `mẹ` `cho` `con` `hoặc` `xác định` `con` `cho` `cha` `,` `mẹ` `.`
|
| 711 |
+
**UPOS:** VERB ADP VERB NOUN PUNCT NOUN ADP NOUN CCONJ VERB NOUN ADP NOUN PUNCT NOUN PUNCT
|
| 712 |
+
|
| 713 |
+
### Sample 81 [RANDOM] — s572
|
| 714 |
+
|
| 715 |
+
**Text:** Bảo đảm an toàn trong trường hợp cây cối , công trình có nguy cơ gây thiệt hại 1 .
|
| 716 |
+
**Tokens:** `Bảo đảm` `an toàn` `trong` `trường hợp` `cây cối` `,` `công trình` `có` `nguy cơ` `gây` `thiệt hại` `1` `.`
|
| 717 |
+
**UPOS:** VERB ADJ ADP NOUN NOUN PUNCT NOUN VERB NOUN VERB NOUN NUM PUNCT
|
| 718 |
+
|
| 719 |
+
### Sample 82 [RANDOM] — s3010
|
| 720 |
+
|
| 721 |
+
**Text:** Chứng từ vận tải đa phương thức là bằng chứng của hợp đồng vận tải đa phương thức , xác nhận việc người kinh doanh vận tải đa phương thức nhận hàng để vận chuyển và cam kết trả hàng theo đúng thỏa thuận của hợp đồng .
|
| 722 |
+
**Tokens:** `Chứng từ` `vận tải` `đa` `phương thức` `là` `bằng chứng` `của` `hợp đồng` `vận tải` `đa` `phương thức` `,` `xác nhận` `việc` `người` `kinh doanh` `vận tải` `đa` `phương thức` `nhận` `hàng` `để` `vận chuyển` `và` `cam kết` `trả` `hàng` `theo` `đúng` `thỏa thuận` `của` `hợp đồng` `.`
|
| 723 |
+
**UPOS:** NOUN VERB NUM NOUN AUX NOUN ADP NOUN VERB VERB NOUN PUNCT VERB NOUN NOUN VERB VERB NUM NOUN VERB NOUN ADP VERB CCONJ VERB VERB NOUN ADP ADJ NOUN ADP NOUN PUNCT
|
| 724 |
+
|
| 725 |
+
### Sample 83 [RANDOM] — s4371
|
| 726 |
+
|
| 727 |
+
**Text:** Tội hành nghề mê tín , dị đoan 1 .
|
| 728 |
+
**Tokens:** `Tội` `hành nghề` `mê tín` `,` `dị đoan` `1` `.`
|
| 729 |
+
**UPOS:** PROPN VERB NOUN PUNCT NOUN NUM PUNCT
|
| 730 |
+
|
| 731 |
+
### Sample 84 [RANDOM] — s3101
|
| 732 |
+
|
| 733 |
+
**Text:** Người thuê tàu có trách nhiệm trả tiền thuê tàu từ ngày nhận tàu đến ngày trả tàu cho chủ tàu .
|
| 734 |
+
**Tokens:** `Người` `thuê` `tàu` `có` `trách nhiệm` `trả` `tiền` `thuê` `tàu` `từ` `ngày` `nhận` `tàu` `đến` `ngày` `trả` `tàu` `cho` `chủ` `tàu` `.`
|
| 735 |
+
**UPOS:** DET VERB NOUN VERB NOUN VERB NOUN VERB NOUN ADP NOUN VERB NOUN ADP NOUN VERB NOUN ADP NOUN NOUN PUNCT
|
| 736 |
+
|
| 737 |
+
### Sample 85 [RANDOM] — s1944
|
| 738 |
+
|
| 739 |
+
**Text:** Người không được quyền hưởng di sản 1 .
|
| 740 |
+
**Tokens:** `Người` `không` `được` `quyền` `hưởng` `di sản` `1` `.`
|
| 741 |
+
**UPOS:** NOUN ADV AUX NOUN VERB NOUN NUM PUNCT
|
| 742 |
+
**Flags:** inconsistent: "quyền" + "hưởng" (also as "quyền hưởng")
|
| 743 |
+
|
| 744 |
+
### Sample 86 [RANDOM] — s9184
|
| 745 |
+
|
| 746 |
+
**Text:** Chính sách của Nhà nước về dân số 1 .
|
| 747 |
+
**Tokens:** `Chính sách` `của` `Nhà nước` `về` `dân số` `1` `.`
|
| 748 |
+
**UPOS:** NOUN ADP NOUN ADP NOUN NUM PUNCT
|
| 749 |
+
|
| 750 |
+
### Sample 87 [RANDOM] — s553
|
| 751 |
+
|
| 752 |
+
**Text:** Quyền và nghĩa vụ của chủ sở hữu , chủ thể có quyền khác đối với tài sản trong trường hợp xảy ra tình thế cấp thiết 1 .
|
| 753 |
+
**Tokens:** `Quyền` `và` `nghĩa vụ` `của` `chủ sở hữu` `,` `chủ thể` `có` `quyền` `khác` `đối với` `tài sản` `trong` `trường hợp` `xảy` `ra` `tình thế` `cấp thiết` `1` `.`
|
| 754 |
+
**UPOS:** NOUN CCONJ NOUN ADP NOUN PUNCT NOUN VERB NOUN ADJ ADP NOUN ADP NOUN VERB VERB NOUN NOUN NUM PUNCT
|
| 755 |
+
|
| 756 |
+
### Sample 88 [RANDOM] — s3919
|
| 757 |
+
|
| 758 |
+
**Text:** Người nào mua bán , chiếm đoạt mô hoặc bộ phận cơ thể người khác , thì bị phạt tù từ 03 năm đến 07 năm .
|
| 759 |
+
**Tokens:** `Người` `nào` `mua bán` `,` `chiếm đoạt` `mô` `hoặc` `bộ phận` `cơ thể` `người` `khác` `,` `thì` `bị` `phạt` `tù` `từ` `03` `năm` `đến` `07` `năm` `.`
|
| 760 |
+
**UPOS:** NOUN PRON VERB PUNCT DET PRON CCONJ NOUN NUM NOUN ADJ PUNCT SCONJ AUX VERB VERB ADP NUM NOUN ADP NUM NOUN PUNCT
|
| 761 |
+
|
| 762 |
+
### Sample 89 [RANDOM] — s4289
|
| 763 |
+
|
| 764 |
+
**Text:** Tội xâm nhập trái phép vào mạng máy tính , mạng viễn thông hoặc phương tiện điện tử của người khác 1 .
|
| 765 |
+
**Tokens:** `Tội` `xâm nhập` `trái phép` `vào` `mạng` `máy tính` `,` `mạng` `viễn thông` `hoặc` `phương tiện` `điện tử` `của` `người` `khác` `1` `.`
|
| 766 |
+
**UPOS:** PROPN VERB ADV ADP NOUN NOUN PUNCT NOUN VERB CCONJ NOUN NOUN ADP NOUN ADJ NUM PUNCT
|
| 767 |
+
|
| 768 |
+
### Sample 90 [RANDOM] — s7826
|
| 769 |
+
|
| 770 |
+
**Text:** Quyết định trả hồ sơ để yêu cầu điều tra bổ sung phải ghi rõ vấn đề cần điều tra bổ sung quy định tại khoản 1 Điều này và các nội dung quy định tại khoản 2 Điều 132 của Bộ luật này .
|
| 771 |
+
**Tokens:** `Quyết định` `trả` `hồ sơ` `để` `yêu cầu` `điều tra` `bổ sung` `phải` `ghi` `rõ` `vấn đề` `cần` `điều tra` `bổ sung` `quy định` `tại` `khoản` `1` `Điều` `này` `và` `các` `nội dung` `quy định` `tại` `khoản` `2` `Điều` `132` `của` `Bộ luật` `này` `.`
|
| 772 |
+
**UPOS:** NOUN VERB NOUN ADP VERB VERB ADV AUX VERB ADJ NOUN AUX VERB VERB VERB ADP NOUN NUM NOUN PRON CCONJ DET NOUN VERB ADP NOUN NUM NOUN ADJ ADP NOUN PRON PUNCT
|
| 773 |
+
|
| 774 |
+
### Sample 91 [RANDOM] — s9883
|
| 775 |
+
|
| 776 |
+
**Text:** Khắc phục hậu quả vụ cháy 1 .
|
| 777 |
+
**Tokens:** `Khắc phục` `hậu quả` `vụ` `cháy` `1` `.`
|
| 778 |
+
**UPOS:** VERB NOUN NOUN VERB NUM PUNCT
|
| 779 |
+
|
| 780 |
+
### Sample 92 [RANDOM] — s3500
|
| 781 |
+
|
| 782 |
+
**Text:** Phạm tội có tổ chức là hình thức đồng phạm có sự câu kết chặt chẽ giữa những người cùng thực hiện tội phạm .
|
| 783 |
+
**Tokens:** `Phạm tội` `có` `tổ chức` `là` `hình thức` `đồng phạm` `có` `sự` `câu kết` `chặt chẽ` `giữa` `những` `người` `cùng` `thực hiện` `tội phạm` `.`
|
| 784 |
+
**UPOS:** NOUN VERB VERB AUX NOUN NOUN VERB NOUN VERB ADJ ADP DET NOUN ADV VERB NOUN PUNCT
|
| 785 |
+
|
| 786 |
+
### Sample 93 [RANDOM] — s3322
|
| 787 |
+
|
| 788 |
+
**Text:** Thời gian tiến hành phân bổ tổn thất chung không tính vào thời hiệu khởi kiện về tổn thất chung .
|
| 789 |
+
**Tokens:** `Thời gian` `tiến hành` `phân bổ` `tổn thất` `chung` `không` `tính` `vào` `thời hiệu` `khởi kiện` `về` `tổn thất` `chung` `.`
|
| 790 |
+
**UPOS:** NOUN VERB NOUN VERB ADJ ADV VERB ADP NOUN VERB ADP NOUN ADJ PUNCT
|
| 791 |
+
**Flags:** inconsistent: "tổn thất" + "chung" (also as "tổn thất chung"); inconsistent: "tổn thất" + "chung" (also as "tổn thất chung")
|
| 792 |
+
|
| 793 |
+
### Sample 94 [RANDOM] — s1358
|
| 794 |
+
|
| 795 |
+
**Text:** Trường hợp theo quy định của luật , tài sản bị cấm hoặc bị hạn chế chuyển nhượng thì tài sản là đối tượng của hợp đồng mua bán phải phù hợp với các quy định đó .
|
| 796 |
+
**Tokens:** `Trường hợp` `theo` `quy định` `của` `luật` `,` `tài sản` `bị` `cấm` `hoặc` `bị` `hạn chế` `chuyển nhượng` `thì` `tài sản` `là` `đối tượng` `của` `hợp đồng` `mua bán` `phải` `phù hợp` `với` `các` `quy định` `đó` `.`
|
| 797 |
+
**UPOS:** NOUN VERB NOUN ADP NOUN PUNCT NOUN AUX VERB CCONJ AUX VERB VERB SCONJ NOUN AUX NOUN ADP NOUN VERB AUX VERB ADP DET NOUN PRON PUNCT
|
| 798 |
+
|
| 799 |
+
### Sample 95 [RANDOM] — s5646
|
| 800 |
+
|
| 801 |
+
**Text:** Xét xử trong trường hợp đương sự , người bảo vệ quyền và lợi ích của đương sự vắng mặt tại phiên tòa Tòa án vẫn tiến hành xét xử vụ án trong các trường hợp sau đây : 1 .
|
| 802 |
+
**Tokens:** `Xét xử` `trong` `trường hợp` `đương sự` `,` `người` `bảo vệ` `quyền` `và` `lợi ích` `của` `đương sự` `vắng mặt` `tại` `phiên` `tòa` `Tòa án` `vẫn` `tiến hành` `xét xử` `vụ` `án` `trong` `các` `trường hợp` `sau` `đây` `:` `1` `.`
|
| 803 |
+
**UPOS:** VERB ADP NOUN NOUN PUNCT DET VERB NOUN CCONJ NOUN ADP NOUN VERB ADP NOUN VERB NOUN ADV VERB VERB NOUN NOUN ADP DET NOUN NOUN PRON PUNCT NUM PUNCT
|
| 804 |
+
**Flags:** inconsistent: "bảo vệ" + "quyền" (also as "bảo vệ quyền"); inconsistent: "phiên" + "tòa" (also as "phiên tòa")
|
| 805 |
+
|
| 806 |
+
### Sample 96 [RANDOM] — s1077
|
| 807 |
+
|
| 808 |
+
**Text:** Chấm dứt bảo lãnh Bảo lãnh chấm dứt trong trường hợp sau đây : 1 .
|
| 809 |
+
**Tokens:** `Chấm dứt` `bảo lãnh` `Bảo lãnh` `chấm dứt` `trong` `trường hợp` `sau` `đây` `:` `1` `.`
|
| 810 |
+
**UPOS:** VERB VERB VERB VERB ADP NOUN NOUN PRON PUNCT NUM PUNCT
|
| 811 |
+
|
| 812 |
+
### Sample 97 [RANDOM] — s7401
|
| 813 |
+
|
| 814 |
+
**Text:** Thời hạn điều tra vụ án hình sự không quá 02 tháng đối với tội phạm ít nghiêm trọng , không quá 03 tháng đối với tội phạm nghiêm trọng , không quá 04 tháng đối với tội phạm rất nghiêm trọng và tội phạm đặc biệt nghiêm trọng kể từ khi khởi tố vụ án cho đến khi kết thúc điều tra .
|
| 815 |
+
**Tokens:** `Thời hạn` `điều tra` `vụ` `án` `hình sự` `không` `quá` `02` `tháng` `đối với` `tội phạm` `ít` `nghiêm trọng` `,` `không` `quá` `03` `tháng` `đối với` `tội phạm` `nghiêm trọng` `,` `không` `quá` `04` `tháng` `đối với` `tội phạm` `rất` `nghiêm trọng` `và` `tội phạm` `đặc biệt` `nghiêm trọng` `kể` `từ` `khi` `khởi tố` `vụ` `án` `cho` `đến` `khi` `kết thúc` `điều tra` `.`
|
| 816 |
+
**UPOS:** NOUN VERB DET NOUN NOUN ADV ADV VERB NOUN ADP NOUN ADJ ADJ PUNCT ADV ADV VERB NOUN ADP NOUN ADJ PUNCT ADV ADV VERB NOUN ADP NOUN ADV ADJ CCONJ NOUN ADJ ADJ VERB ADP NOUN VERB DET NOUN ADP ADP NOUN VERB VERB PUNCT
|
| 817 |
+
|
| 818 |
+
### Sample 98 [RANDOM] — s6830
|
| 819 |
+
|
| 820 |
+
**Text:** Nhiệm vụ , quyền hạn và trách nhiệm của Thẩm tra viên 1 .
|
| 821 |
+
**Tokens:** `Nhiệm vụ` `,` `quyền hạn` `và` `trách nhiệm` `của` `Thẩm tra viên` `1` `.`
|
| 822 |
+
**UPOS:** NOUN PUNCT NOUN CCONJ NOUN ADP NOUN NUM PUNCT
|
| 823 |
+
|
| 824 |
+
### Sample 99 [RANDOM] — s1848
|
| 825 |
+
|
| 826 |
+
**Text:** Nguyên tắc bồi thường thiệt hại 1 .
|
| 827 |
+
**Tokens:** `Nguyên tắc` `bồi thường` `thiệt hại` `1` `.`
|
| 828 |
+
**UPOS:** NOUN VERB NOUN NUM PUNCT
|
| 829 |
+
|
| 830 |
+
### Sample 100 [RANDOM] — s4139
|
| 831 |
+
|
| 832 |
+
**Text:** Phạm tội thuộc một trong các trường hợp sau đây , thì bị tù từ 05 năm đến 10 năm .
|
| 833 |
+
**Tokens:** `Phạm tội` `thuộc` `một` `trong` `các` `trường hợp` `sau` `đây` `,` `thì` `bị` `tù` `từ` `05` `năm` `đến` `10` `năm` `.`
|
| 834 |
+
**UPOS:** NOUN VERB NUM ADP DET NOUN ADP PRON PUNCT SCONJ AUX VERB ADP NUM NOUN ADP NUM NOUN PUNCT
|
| 835 |
+
|
| 836 |
+
## 6. Dictionary-Based Validation
|
| 837 |
+
|
| 838 |
+
**Dictionary:** Viet74K
|
| 839 |
+
**Dictionary size:** 72,535 entries
|
| 840 |
+
|
| 841 |
+
### 6a. Token Coverage
|
| 842 |
+
|
| 843 |
+
| Metric | Count | Percentage |
|
| 844 |
+
|:---|---:|---:|
|
| 845 |
+
| In dictionary | 226,483 | 98.2% |
|
| 846 |
+
| Out-of-vocabulary (OOV) | 4,226 | 1.8% |
|
| 847 |
+
| Total (excl. PUNCT/NUM/SYM) | 230,709 | 100% |
|
| 848 |
+
|
| 849 |
+
**Coverage by UPOS** (top tags):
|
| 850 |
+
|
| 851 |
+
| UPOS | In Dict | Total | Coverage |
|
| 852 |
+
|:---|---:|---:|---:|
|
| 853 |
+
| NOUN | 72,284 | 73,953 | 97.7% |
|
| 854 |
+
| VERB | 55,504 | 56,733 | 97.8% |
|
| 855 |
+
| PUNCT | 26,110 | 26,110 | 100.0% |
|
| 856 |
+
| ADP | 22,508 | 22,534 | 99.9% |
|
| 857 |
+
| AUX | 8,444 | 8,444 | 100.0% |
|
| 858 |
+
| ADV | 7,844 | 7,867 | 99.7% |
|
| 859 |
+
| NUM | 7,509 | 7,509 | 100.0% |
|
| 860 |
+
| ADJ | 6,408 | 7,093 | 90.3% |
|
| 861 |
+
| CCONJ | 5,957 | 5,957 | 100.0% |
|
| 862 |
+
| DET | 5,225 | 5,326 | 98.1% |
|
| 863 |
+
| PRON | 3,841 | 3,853 | 99.7% |
|
| 864 |
+
| SCONJ | 3,793 | 3,793 | 100.0% |
|
| 865 |
+
|
| 866 |
+
**Top 30 OOV tokens:**
|
| 867 |
+
|
| 868 |
+
| Token | Count | UPOS |
|
| 869 |
+
|:---|---:|:---|
|
| 870 |
+
| việt nam | 368 | PROPN |
|
| 871 |
+
| hóa | 194 | NOUN |
|
| 872 |
+
| tọa | 127 | NOUN |
|
| 873 |
+
| cộng hòa | 89 | NOUN |
|
| 874 |
+
| xã hội chủ nghĩa việt nam | 89 | NOUN |
|
| 875 |
+
| tố tụng dân sự | 85 | NOUN |
|
| 876 |
+
| ủy quyền | 78 | NOUN |
|
| 877 |
+
| điều tra viên | 73 | NOUN |
|
| 878 |
+
| ủy | 70 | NOUN |
|
| 879 |
+
| 02 | 70 | NOUN |
|
| 880 |
+
| 03 | 67 | VERB |
|
| 881 |
+
| thụ lý | 67 | NOUN |
|
| 882 |
+
| tòa án nhân dân | 64 | NOUN |
|
| 883 |
+
| ủy thác | 60 | NOUN |
|
| 884 |
+
| hành lý | 59 | NOUN |
|
| 885 |
+
| hòa | 56 | NOUN |
|
| 886 |
+
| bộ luật hình sự | 51 | NOUN |
|
| 887 |
+
| % | 51 | NOUN |
|
| 888 |
+
| tùy | 40 | VERB |
|
| 889 |
+
| cầm giữ | 39 | NOUN |
|
| 890 |
+
| xóa | 38 | NOUN |
|
| 891 |
+
| 05 | 37 | NOUN |
|
| 892 |
+
| người thân thích | 37 | NOUN |
|
| 893 |
+
| 07 | 36 | NOUN |
|
| 894 |
+
| xét đơn | 35 | NOUN |
|
| 895 |
+
| ủy ban nhân dân | 35 | NOUN |
|
| 896 |
+
| lai dắt | 34 | NOUN |
|
| 897 |
+
| xét tính | 31 | NOUN |
|
| 898 |
+
| hòa giải | 30 | NOUN |
|
| 899 |
+
| ủy ban | 29 | NOUN |
|
| 900 |
+
|
| 901 |
+
### 6b. Possible Under-Segmentation (Over-Merged Tokens)
|
| 902 |
+
|
| 903 |
+
Multi-syllable tokens NOT in dictionary, but all individual syllables ARE
|
| 904 |
+
in dictionary. These may be incorrectly merged by the tokenizer.
|
| 905 |
+
|
| 906 |
+
Total occurrences: 2,231
|
| 907 |
+
Unique forms: 382
|
| 908 |
+
|
| 909 |
+
| Token | Count | Sub-parts |
|
| 910 |
+
|:---|---:|:---|
|
| 911 |
+
| việt nam | 368 | việt + nam |
|
| 912 |
+
| xã hội chủ nghĩa việt nam | 89 | xã + hội + chủ + nghĩa + việt + nam |
|
| 913 |
+
| tố tụng dân sự | 85 | tố + tụng + dân + sự |
|
| 914 |
+
| điều tra viên | 73 | điều + tra + viên |
|
| 915 |
+
| thụ lý | 67 | thụ + lý |
|
| 916 |
+
| tòa án nhân dân | 64 | tòa + án + nhân + dân |
|
| 917 |
+
| hành lý | 59 | hành + lý |
|
| 918 |
+
| bộ luật hình sự | 51 | bộ + luật + hình + sự |
|
| 919 |
+
| cầm giữ | 39 | cầm + giữ |
|
| 920 |
+
| người thân thích | 37 | người + thân + thích |
|
| 921 |
+
| xét đơn | 35 | xét + đơn |
|
| 922 |
+
| lai dắt | 34 | lai + dắt |
|
| 923 |
+
| xét tính | 31 | xét + tính |
|
| 924 |
+
| chữ ký | 28 | chữ + ký |
|
| 925 |
+
| a khoản | 27 | a + khoản |
|
| 926 |
+
| viện kiểm sát nhân dân | 26 | viện + kiểm + sát + nhân + dân |
|
| 927 |
+
| bảo hiểm y tế | 26 | bảo + hiểm + y + tế |
|
| 928 |
+
| ký tên | 25 | ký + tên |
|
| 929 |
+
| kiểm tra viên | 24 | kiểm + tra + viên |
|
| 930 |
+
| bộ luật dân sự | 24 | bộ + luật + dân + sự |
|
| 931 |
+
| bất kỳ | 23 | bất + kỳ |
|
| 932 |
+
| thẩm tra viên | 23 | thẩm + tra + viên |
|
| 933 |
+
| tố tụng hình sự | 23 | tố + tụng + hình + sự |
|
| 934 |
+
| người quản lý | 22 | người + quản + lý |
|
| 935 |
+
| tòa án nhân dân tối cao | 22 | tòa + án + nhân + dân + tối + cao |
|
| 936 |
+
| thu lợi | 20 | thu + lợi |
|
| 937 |
+
| đâm va | 19 | đâm + va |
|
| 938 |
+
| phiên tòa | 18 | phiên + tòa |
|
| 939 |
+
| viện kiểm sát nhân dân tối cao | 17 | viện + kiểm + sát + nhân + dân + tối + cao |
|
| 940 |
+
| thuyền bộ | 17 | thuyền + bộ |
|
| 941 |
+
| điều cấm | 14 | điều + cấm |
|
| 942 |
+
| neo đậu | 13 | neo + đậu |
|
| 943 |
+
| phản tố | 13 | phản + tố |
|
| 944 |
+
| chung hợp | 12 | chung + hợp |
|
| 945 |
+
| nội quy phiên | 12 | nội + quy + phiên |
|
| 946 |
+
| lúc nào | 12 | lúc + nào |
|
| 947 |
+
| người dân | 11 | người + dân |
|
| 948 |
+
| ban nhân dân | 10 | ban + nhân + dân |
|
| 949 |
+
| bộ tư pháp | 10 | bộ + tư + pháp |
|
| 950 |
+
| thỏa ước | 10 | thỏa + ước |
|
| 951 |
+
|
| 952 |
+
### 6c. Possible Over-Segmentation (Under-Merged Tokens)
|
| 953 |
+
|
| 954 |
+
Adjacent tokens that together form a word found in the dictionary.
|
| 955 |
+
These may be incorrectly split by the tokenizer.
|
| 956 |
+
|
| 957 |
+
Total occurrences: 7,373
|
| 958 |
+
Unique dictionary words split: 462
|
| 959 |
+
|
| 960 |
+
| Dictionary Word | Times Split | Example sent_id |
|
| 961 |
+
|:---|---:|:---|
|
| 962 |
+
| vụ án | 892 | s8490 |
|
| 963 |
+
| kể từ | 429 | s8211 |
|
| 964 |
+
| phạt tù | 422 | s4203 |
|
| 965 |
+
| từ ngày | 371 | s8211 |
|
| 966 |
+
| sau đây | 323 | s3478 |
|
| 967 |
+
| trở lên | 235 | s111 |
|
| 968 |
+
| phạt tiền | 178 | s4144 |
|
| 969 |
+
| bên có | 167 | s1334 |
|
| 970 |
+
| trước khi | 154 | s7558 |
|
| 971 |
+
| hủy bỏ | 147 | s5308 |
|
| 972 |
+
| xảy ra | 119 | s3341 |
|
| 973 |
+
| chữa bệnh | 112 | s9086 |
|
| 974 |
+
| không phải | 103 | s3029 |
|
| 975 |
+
| lời khai | 102 | s7093 |
|
| 976 |
+
| kèm theo | 101 | s5502 |
|
| 977 |
+
| nếu không | 100 | s1782 |
|
| 978 |
+
| ghi rõ | 99 | s7093 |
|
| 979 |
+
| trả lại | 94 | s8211 |
|
| 980 |
+
| thứ ba | 86 | s5252 |
|
| 981 |
+
| có người | 84 | s231 |
|
| 982 |
+
| cấp cao | 62 | s6103 |
|
| 983 |
+
| tiền công | 62 | s3044 |
|
| 984 |
+
| khám bệnh | 57 | s9086 |
|
| 985 |
+
| cho thuê | 55 | s788 |
|
| 986 |
+
| đưa ra | 54 | s1275 |
|
| 987 |
+
| như sau | 53 | s5403 |
|
| 988 |
+
| vượt quá | 53 | s1649 |
|
| 989 |
+
| lấy lời | 53 | s7093 |
|
| 990 |
+
| cấp trên | 52 | s6643 |
|
| 991 |
+
| rút gọn | 51 | s8716 |
|
| 992 |
+
| sức khỏe | 44 | s4204 |
|
| 993 |
+
| bên kia | 43 | s1275 |
|
| 994 |
+
| giấy chứng nhận | 41 | s2220 |
|
| 995 |
+
| đưa vào | 40 | s9095 |
|
| 996 |
+
| tù chung thân | 38 | s4204 |
|
| 997 |
+
| có lỗi | 38 | s2875 |
|
| 998 |
+
| trong khi | 36 | s2737 |
|
| 999 |
+
| có một | 32 | s1283 |
|
| 1000 |
+
| còn lại | 32 | s1283 |
|
| 1001 |
+
| tàu biển | 32 | s2270 |
|
| 1002 |
+
| quá hạn | 31 | s8210 |
|
| 1003 |
+
| tính từ | 31 | s2925 |
|
| 1004 |
+
| với nhau | 31 | s6011 |
|
| 1005 |
+
| làm chủ | 30 | s5640 |
|
| 1006 |
+
| để lại | 29 | s1945 |
|
| 1007 |
+
| sinh ra | 28 | s111 |
|
| 1008 |
+
| khác nhau | 27 | s472 |
|
| 1009 |
+
| nhất là | 27 | s6560 |
|
| 1010 |
+
| thứ hai | 26 | s9589 |
|
| 1011 |
+
| ô nhiễm môi trường | 26 | s2220 |
|
| 1012 |
+
|
| 1013 |
+
### 6d. Summary
|
| 1014 |
+
|
| 1015 |
+
- **Dictionary coverage**: 98.2% of tokens are known words
|
| 1016 |
+
- **Possible over-merges**: 382 unique multi-syllable OOV forms (2,231 occurrences)
|
| 1017 |
+
- **Possible under-merges**: 462 unique dictionary words found split (7,373 occurrences)
|
TECHNICAL_REPORT.md
ADDED
|
@@ -0,0 +1,351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UDD-1: A Large-Scale Vietnamese Universal Dependencies Treebank
|
| 2 |
+
|
| 3 |
+
**Underthesea NLP**
|
| 4 |
+
|
| 5 |
+
## Abstract
|
| 6 |
+
|
| 7 |
+
Vietnamese remains underrepresented in the Universal Dependencies (UD) ecosystem, with only one official treebank (UD_Vietnamese-VTB) containing 3,323 sentences restricted to the news domain. We present UDD-1, a silver-standard Vietnamese UD treebank comprising 10,000 sentences and 230,709 tokens from the legal domain (Vietnamese Legal Corpus). Annotations are machine-generated using the Underthesea NLP toolkit (v2.1.0) Biaffine attention-based dependency parser trained on VLSP 2020 data, followed by a multi-pass rule-based post-processing pipeline that enforces UD v2 structural and linguistic constraints. The resulting treebank uses 15 UPOS tags and 32 universal dependency relation types (with 45 language-specific subtypes, totaling 77 distinct relation labels). As a silver-standard resource, annotation quality is bounded by the underlying parser's performance (~76% LAS on the VLSP 2020 benchmark); we provide a detailed analysis of annotation characteristics including XPOS-UPOS consistency and post-processing effects. UDD-1 is publicly available at `https://huggingface.co/datasets/undertheseanlp/UDD-1` under the CC BY-SA 4.0 license.
|
| 8 |
+
|
| 9 |
+
## 1. Introduction
|
| 10 |
+
|
| 11 |
+
Universal Dependencies (UD; de Marneffe et al., 2021) has become the de facto standard for cross-linguistically consistent treebank annotation, currently encompassing over 200 treebanks across 150+ languages. For Vietnamese, however, the UD ecosystem remains limited. The sole official Vietnamese UD treebank, UD_Vietnamese-VTB, contains only 3,323 sentences (58,069 tokens) drawn from a single news source (Nguyen et al., 2009). This scarcity constrains both the development of Vietnamese dependency parsers and the evaluation of cross-lingual transfer methods on Vietnamese data.
|
| 12 |
+
|
| 13 |
+
Several Vietnamese dependency treebanks exist outside the UD framework, including VnDT (10,200 sentences; Nguyen et al., 2014) and BKTreebank (6,900 sentences; Nguyen, 2018). However, none of these follow UD annotation guidelines, limiting their utility for cross-lingual research and interoperability with the broader UD ecosystem. Furthermore, all existing Vietnamese treebanks---both within and outside the UD framework---are restricted to the news domain. Recent work on domain generalization for Vietnamese dependency parsing (Huynh et al., 2025) shows that domain shifts cause significant parser performance degradation of 3--5% LAS, motivating multi-domain treebank construction.
|
| 14 |
+
|
| 15 |
+
We present UDD-1, a Vietnamese UD treebank that addresses these gaps through two contributions:
|
| 16 |
+
|
| 17 |
+
- We release a large-scale Vietnamese UD treebank of 10,000 sentences (230,709 tokens) from the legal domain, making it approximately 3x larger than UD_Vietnamese-VTB in sentences and 4x in tokens. This is the first Vietnamese UD treebank to cover a domain other than news.
|
| 18 |
+
- We describe a reproducible pipeline for generating UD-compliant treebanks by combining neural parsing with extensive rule-based post-processing, demonstrating how machine-generated annotations can produce structurally valid UD data at scale.
|
| 19 |
+
|
| 20 |
+
## 2. Related Work
|
| 21 |
+
|
| 22 |
+
### 2.1 Vietnamese Dependency Treebanks
|
| 23 |
+
|
| 24 |
+
The earliest Vietnamese dependency resource is VnDT (Nguyen et al., 2014), created by converting the VLSP constituent treebank to dependency format. VnDT contains 10,200 sentences from the news domain and has served as the primary evaluation benchmark for Vietnamese dependency parsing. BKTreebank (Nguyen, 2018) provides 6,900 manually annotated sentences from news sources, annotated by three annotators using the BRAT tool.
|
| 25 |
+
|
| 26 |
+
UD_Vietnamese-VTB (Nguyen et al., 2009) is the only Vietnamese treebank in the official UD collection. Originally a constituency treebank from the VLSP project, it was automatically converted to UD format with manually annotated XPOS tags. Its small size (3,323 sentences, 58,069 tokens) and single-domain coverage limit its utility for training robust parsers.
|
| 27 |
+
|
| 28 |
+
Most recently, Huynh et al. (2025) introduced DGDT, a domain generalization benchmark for Vietnamese dependency parsing with data from multiple domains, demonstrating 3.27% UAS and 5.09% LAS degradation across domains. This work directly motivates the construction of domain-diverse Vietnamese UD resources.
|
| 29 |
+
|
| 30 |
+
### 2.2 Vietnamese Dependency Parsing
|
| 31 |
+
|
| 32 |
+
Vietnamese dependency parsing has advanced significantly with the introduction of pre-trained language models. VnCoreNLP (Vu et al., 2018) established early baselines with a joint segmentation and parsing pipeline achieving 73.39% LAS on VnDT. PhoBERT (Nguyen and Nguyen, 2020), a RoBERTa-based model pre-trained on Vietnamese data, substantially improved results across NLP tasks. PhoNLP (Nguyen and Nguyen, 2021), a multi-task model built on PhoBERT, currently achieves state-of-the-art performance on VnDT with 79.11% LAS and 85.47% UAS.
|
| 33 |
+
|
| 34 |
+
The VLSP shared tasks have been instrumental in advancing Vietnamese parsing. The VLSP 2019 shared task introduced dependency parsing evaluation with approximately 4,000 sentences. The subsequent VLSP 2020 Universal Dependency Parsing shared task (Ha et al., 2020) provided a larger UD v2 corpus (8,152 training + 1,123 test sentences) and evaluated participating systems using CoNLL 2018 evaluation scripts, with the best system achieving 76.27% LAS on pre-segmented input.
|
| 35 |
+
|
| 36 |
+
Recent work by Nguyen et al. (2024) developed an HPSG-based neural parser for Vietnamese, achieving 82% F-score for constituency parsing and identifying approximately 15% non-compliant trees in the VietTreebank/VnDT, highlighting ongoing treebank quality challenges.
|
| 37 |
+
|
| 38 |
+
### 2.3 Machine-Generated UD Treebanks
|
| 39 |
+
|
| 40 |
+
The UD community has embraced silver-standard (machine-generated) treebanks as valuable complements to gold-standard resources. Notable examples include UD-English-CHILDES (Yang et al., 2025), which provides approximately 1 million silver-standard sentences generated using Stanza alongside 48,000 gold-annotated sentences, with the silver parser achieving 83.3% LAS on the target domain. Machine-generated treebanks enable scale that is impractical with manual annotation, and have been shown to improve parser training when combined with gold data (Nivre et al., 2020). UDD-1 follows this established practice.
|
| 41 |
+
|
| 42 |
+
### 2.4 Legal Domain Treebanks
|
| 43 |
+
|
| 44 |
+
Legal text poses specific challenges for dependency parsing due to long sentences, specialized terminology, and complex nested structures. The most notable legal-domain UD treebank is UD_Czech-CLTT (Sevcikova and Zabokrtsky, 2016), containing 1,121 manually annotated sentences from Czech legal documents. To our knowledge, no legal-domain UD treebank exists for Vietnamese or any other Southeast Asian language.
|
| 45 |
+
|
| 46 |
+
### 2.5 Universal Dependencies Framework
|
| 47 |
+
|
| 48 |
+
Universal Dependencies v2 (Nivre et al., 2020) defines a cross-linguistically consistent annotation scheme with 17 universal POS tags and a taxonomy of dependency relations. Treebank validity is enforced through a multi-level validation system: Level 1 checks CoNLL-U format compliance, Level 2 verifies token-level annotation constraints, and Levels 3--5 enforce progressively stricter syntactic well-formedness rules including tree structure validation, UPOS-deprel compatibility, and language-specific constraints.
|
| 49 |
+
|
| 50 |
+
## 3. Data Collection
|
| 51 |
+
|
| 52 |
+
### 3.1 Source Corpus
|
| 53 |
+
|
| 54 |
+
UDD-1 draws sentences from the Vietnamese Legal Corpus (UTS_VLC), a collection of Vietnamese legal documents hosted on HuggingFace (`undertheseanlp/UTS_VLC`). Legal text was chosen to diversify the domain coverage of Vietnamese UD resources beyond the news domain that dominates existing treebanks.
|
| 55 |
+
|
| 56 |
+
### 3.2 Sentence Selection
|
| 57 |
+
|
| 58 |
+
Raw documents undergo a cleaning pipeline that removes markdown formatting, normalizes Unicode using Underthesea's `text_normalize`, and segments text into sentences using Underthesea's `sent_tokenize`. Sentences are then filtered by the following criteria:
|
| 59 |
+
|
| 60 |
+
- **Length**: Minimum 20 characters, maximum 300 characters
|
| 61 |
+
- **Content**: Must contain Vietnamese diacritical characters
|
| 62 |
+
- **Structure**: Excludes headers (all-caps text), article titles (e.g., "Điều 1"), incomplete sentences, and text with >50% uppercase characters
|
| 63 |
+
- **Format**: Excludes metadata lines, list markers, and non-prose content
|
| 64 |
+
|
| 65 |
+
This filtering yields 10,000 sentences suitable for syntactic annotation. We note that the sentence-level filtering does not eliminate all non-prose content; some section headings and incomplete fragments may pass the heuristic filters.
|
| 66 |
+
|
| 67 |
+
### 3.3 Train/Dev/Test Split
|
| 68 |
+
|
| 69 |
+
The 10,000 sentences are split into train, development, and test sets. Sentences are drawn sequentially from the source corpus and split by position: the first 9,165 sentences form the training set, with the subsequent 432 and 403 sentences forming the development and test sets respectively. This sequential split preserves document-level coherence within each partition but does not guarantee topic diversity across splits. The split ratio (91.7%/4.3%/4.0%) prioritizes training data given the treebank's intended use as a silver-standard training resource. Future releases may adopt stratified or document-level splits.
|
| 70 |
+
|
| 71 |
+
## 4. Annotation Pipeline
|
| 72 |
+
|
| 73 |
+
### 4.1 Initial Annotation
|
| 74 |
+
|
| 75 |
+
Each sentence is processed through two Underthesea models (Underthesea v2.1.0, PyTorch 2.0):
|
| 76 |
+
|
| 77 |
+
1. **Dependency Parser** (`dependency_parse`): A Biaffine attention-based graph parser (Dozat and Manning, 2017) trained on the VLSP 2020 Dependency Parsing dataset. The model uses BERT embeddings (4 layers, 768 dimensions) with a BiLSTM encoder (400 hidden units, 3 layers, 0.33 dropout). This produces tokens, dependency heads, and dependency relation labels. The parser achieves approximately 76% LAS on the VLSP 2020 benchmark on pre-segmented news text; performance on legal text has not been independently evaluated and may differ due to domain shift.
|
| 78 |
+
|
| 79 |
+
2. **POS Tagger** (`pos_tag`): Produces Vietnamese-specific POS tags (XPOS), which are then mapped to Universal POS tags (UPOS) using a hand-crafted mapping table covering 18 Vietnamese POS categories to 15 UPOS categories.
|
| 80 |
+
|
| 81 |
+
When the dependency parser fails on a sentence (e.g., due to tokenization errors), a fallback assigns all tokens HEAD=0 with `dep` relations and the first token as `root`. We estimate this fallback is triggered rarely (<0.1% of sentences) but these sentences are included in the final treebank.
|
| 82 |
+
|
| 83 |
+
### 4.2 Vietnamese POS Mapping
|
| 84 |
+
|
| 85 |
+
Vietnamese POS tags are mapped to UPOS following the correspondences in Table 1. Special handling is applied for:
|
| 86 |
+
|
| 87 |
+
- **Auxiliary verbs**: A curated list of 20 Vietnamese auxiliaries (e.g., *được*, *phải*, *có thể*, *là*) are tagged as AUX regardless of their XPOS. This is a simplification: some of these words (e.g., *là*) can function as different parts of speech depending on context. The list-based approach prioritizes consistency over contextual accuracy.
|
| 88 |
+
- **Determiners**: Common Vietnamese determiners (e.g., *các*, *những*, *mọi*, *này*, *đó*) are identified for DET assignment.
|
| 89 |
+
- **Adverbs**: Temporal and negation markers (e.g., *không*, *đã*, *đang*, *sẽ*) are mapped to ADV.
|
| 90 |
+
|
| 91 |
+
| Vietnamese XPOS | UPOS | Examples |
|
| 92 |
+
|:-:|:-:|---------|
|
| 93 |
+
| N, Nc, Nu, Ny | NOUN | *nhà*, *trường*, *tàu* |
|
| 94 |
+
| Np | PROPN | *Việt Nam*, *Hà Nội* |
|
| 95 |
+
| V | VERB | *làm*, *chạy*, *nói* |
|
| 96 |
+
| A | ADJ | *đẹp*, *lớn*, *mới* |
|
| 97 |
+
| P | PRON | *tôi*, *anh*, *nó* |
|
| 98 |
+
| R | ADV | *rất*, *đã*, *không* |
|
| 99 |
+
| L | DET | *các*, *những*, *mỗi* |
|
| 100 |
+
| M | NUM | *một*, *hai*, *ba* |
|
| 101 |
+
| E | ADP | *của*, *trong*, *về* |
|
| 102 |
+
| C, CC | CCONJ | *và*, *hoặc*, *hay* |
|
| 103 |
+
| SC | SCONJ | *vì*, *nếu*, *khi* |
|
| 104 |
+
| CH | PUNCT | *.*, *,*, *;* |
|
| 105 |
+
| T | PART | *ạ*, *nhé* |
|
| 106 |
+
| I | INTJ | *ôi*, *chao* |
|
| 107 |
+
| B, Y, X | X | Foreign/other |
|
| 108 |
+
| S | SYM | *%*, *$* |
|
| 109 |
+
|
| 110 |
+
**Table 1**: Vietnamese XPOS to Universal POS (UPOS) mapping.
|
| 111 |
+
|
| 112 |
+
### 4.3 Rule-Based Post-Processing
|
| 113 |
+
|
| 114 |
+
The initial neural annotations frequently violate UD structural and linguistic constraints. We apply a multi-pass rule-based post-processing pipeline (`fix_syntax_errors`) that addresses the following categories of errors:
|
| 115 |
+
|
| 116 |
+
**Pass 1: Leaf Node Enforcement.** UD requires that certain relations (aux, cop, mark, case, punct, det, nummod, clf) attach as leaves---they should not have dependents. The pipeline iteratively redirects children of leaf-relation nodes to the node's own head, running up to 5 passes to resolve chains.
|
| 117 |
+
|
| 118 |
+
**Pass 2: UPOS-Deprel Consistency.** UD enforces strict compatibility between UPOS tags and dependency relations. When a mismatch is detected, the pipeline must decide whether to change the UPOS or the deprel. Our strategy is:
|
| 119 |
+
|
| 120 |
+
- For relations with strong UPOS constraints (`nummod` with non-NUM verbs/adjectives), the deprel is changed (e.g., to `acl` or `amod`) to preserve the more reliable XPOS-derived UPOS.
|
| 121 |
+
- For relations where the deprel is more informative than the XPOS (e.g., `aux` with a known auxiliary lemma), the UPOS is changed to match the deprel.
|
| 122 |
+
- For other mismatches, the UPOS is changed to satisfy the UD constraint.
|
| 123 |
+
|
| 124 |
+
Specific rules applied:
|
| 125 |
+
|
| 126 |
+
- `det` relations must have DET or PRON dependents (UPOS forced to DET)
|
| 127 |
+
- `advmod` relations must have ADV dependents (UPOS forced to ADV)
|
| 128 |
+
- `nummod` relations must have NUM dependents; verbs and adjectives are reassigned to `acl` and `amod` respectively (deprel changed); other POS forced to NUM
|
| 129 |
+
- `mark` relations cannot have AUX dependents (UPOS reassigned to SCONJ)
|
| 130 |
+
- `punct` relations must have PUNCT dependents and vice versa
|
| 131 |
+
- `case` relations should not have ADJ, AUX, PROPN, NOUN, or VERB dependents (UPOS reassigned to ADP)
|
| 132 |
+
- `cc` relations must have CCONJ or SCONJ dependents
|
| 133 |
+
- `aux` relations must have AUX dependents; invalid auxiliaries are reassigned to `advcl` or `xcomp` (deprel changed)
|
| 134 |
+
- `cop` relations are restricted to the copula *là*
|
| 135 |
+
|
| 136 |
+
**Trade-off.** This approach ensures UD structural validity but introduces a known trade-off: when the parser assigns an incorrect deprel, forcing the UPOS to match propagates the error to the POS column. Across the treebank, 8.6% of tokens (19,933 of 230,709) have a UPOS that differs from the XPOS-derived mapping. Of these, approximately 18.8% are justified functional-word reclassifications (e.g., verbs in the auxiliary list tagged as AUX), 73.8% are forced by UPOS-deprel consistency rules, and 7.4% arise from other corrections. Users who need reliable POS tags independent of syntactic relations should prefer the XPOS column, which reflects the POS tagger's output without post-processing modifications.
|
| 137 |
+
|
| 138 |
+
**Pass 3: Invalid Deprel Mapping.** Non-standard dependency relations produced by the parser are mapped to valid UD relations (e.g., `acomp` $\to$ `xcomp`, `nmod:comp` $\to$ `nmod`, `compound:number` $\to$ `nummod`).
|
| 139 |
+
|
| 140 |
+
**Pass 4: Structural Constraints.**
|
| 141 |
+
|
| 142 |
+
- *Directionality*: `flat`, `conj`, and `appos` relations must be left-to-right; right-to-left instances are converted to `compound`.
|
| 143 |
+
- *Multiple subjects*: When a predicate has multiple `nsubj`/`csubj` dependents, subsequent ones receive the `:outer` subtype.
|
| 144 |
+
- *Multiple objects*: Second `obj` dependents are reassigned to `compound` (if adjacent) or `iobj`.
|
| 145 |
+
- *Non-projective punctuation*: Punctuation attachment is adjusted to minimize edge crossings using a greedy algorithm that tests candidate heads in order of proximity.
|
| 146 |
+
|
| 147 |
+
### 4.4 Additional Annotations
|
| 148 |
+
|
| 149 |
+
- **Lemmas**: Generated by lowercasing token forms (approximation suitable for Vietnamese, which has minimal inflectional morphology).
|
| 150 |
+
- **SpaceAfter**: Computed by aligning tokens against the original text to identify tokens not followed by whitespace (encoded as `SpaceAfter=No` in the MISC column).
|
| 151 |
+
- **Morphological features**: Left underspecified (`_`) as Vietnamese has limited morphological marking.
|
| 152 |
+
|
| 153 |
+
## 5. Dataset Statistics
|
| 154 |
+
|
| 155 |
+
### 5.1 Overview
|
| 156 |
+
|
| 157 |
+
| | Train | Dev | Test | Total |
|
| 158 |
+
|---|---:|---:|---:|---:|
|
| 159 |
+
| Sentences | 9,165 | 432 | 403 | 10,000 |
|
| 160 |
+
| Tokens | 210,668 | 10,033 | 10,008 | 230,709 |
|
| 161 |
+
| Avg. sentence length | 22.9 | 23.2 | 24.8 | 23.1 |
|
| 162 |
+
|
| 163 |
+
**Table 2**: Dataset split statistics.
|
| 164 |
+
|
| 165 |
+
All sentences have a single root node. The average tree depth is 6.92. Of all sentences, 85.3% have projective tree structures and 14.7% contain at least one non-projective arc.
|
| 166 |
+
|
| 167 |
+
### 5.2 Sentence Length Distribution
|
| 168 |
+
|
| 169 |
+
| Length (tokens) | Count | Percentage |
|
| 170 |
+
|:---:|---:|---:|
|
| 171 |
+
| 1--10 | 1,932 | 19.3% |
|
| 172 |
+
| 11--20 | 2,752 | 27.5% |
|
| 173 |
+
| 21--30 | 2,559 | 25.6% |
|
| 174 |
+
| 31--40 | 1,638 | 16.4% |
|
| 175 |
+
| 41--50 | 887 | 8.9% |
|
| 176 |
+
| 51+ | 232 | 2.3% |
|
| 177 |
+
|
| 178 |
+
**Table 3**: Sentence length distribution.
|
| 179 |
+
|
| 180 |
+
### 5.3 POS Tag Distribution
|
| 181 |
+
|
| 182 |
+
| UPOS | Count | % | UPOS | Count | % |
|
| 183 |
+
|:---|---:|---:|:---|---:|---:|
|
| 184 |
+
| NOUN | 73,953 | 32.1 | CCONJ | 5,957 | 2.6 |
|
| 185 |
+
| VERB | 56,733 | 24.6 | DET | 5,326 | 2.3 |
|
| 186 |
+
| PUNCT | 26,110 | 11.3 | PRON | 3,853 | 1.7 |
|
| 187 |
+
| ADP | 22,534 | 9.8 | SCONJ | 3,793 | 1.6 |
|
| 188 |
+
| AUX | 8,444 | 3.7 | PROPN | 1,311 | 0.6 |
|
| 189 |
+
| ADV | 7,867 | 3.4 | PART | 154 | 0.1 |
|
| 190 |
+
| NUM | 7,509 | 3.3 | X | 72 | <0.1 |
|
| 191 |
+
| ADJ | 7,093 | 3.1 | | | |
|
| 192 |
+
|
| 193 |
+
**Table 4**: UPOS distribution across the full dataset. 15 of the 17 universal POS tags are attested. INTJ and SYM are absent, consistent with the formal register of legal text (the POS mapping supports both categories but no instances occur in this corpus).
|
| 194 |
+
|
| 195 |
+
The predominance of NOUN (32.1%) and VERB (24.6%) is characteristic of legal text, which is heavy on nominal constructions and procedural descriptions. The high frequency of ADP (9.8%) reflects the prevalence of prepositional phrases specifying legal scope and conditions.
|
| 196 |
+
|
| 197 |
+
### 5.4 Dependency Relation Distribution
|
| 198 |
+
|
| 199 |
+
The treebank uses 77 distinct dependency relation labels, comprising 32 universal base types (out of 37 defined in UD v2) and 45 language-specific subtypes. The base types cover the core UD relations; 5 universal types not attested are `expl`, `fixed`, `goeswith`, `orphan`, and `reparandum`. The 15 most frequent relations are shown in Table 5.
|
| 200 |
+
|
| 201 |
+
| Relation | Count | % | Relation | Count | % |
|
| 202 |
+
|:---|---:|---:|:---|---:|---:|
|
| 203 |
+
| punct | 26,110 | 11.3 | cc | 5,927 | 2.6 |
|
| 204 |
+
| obj | 21,757 | 9.4 | nummod | 5,831 | 2.5 |
|
| 205 |
+
| nmod | 20,998 | 9.1 | obl | 5,423 | 2.4 |
|
| 206 |
+
| case | 20,860 | 9.0 | nmod:poss | 5,377 | 2.3 |
|
| 207 |
+
| conj | 19,751 | 8.6 | advmod | 4,620 | 2.0 |
|
| 208 |
+
| compound | 12,334 | 5.3 | aux:pass | 4,324 | 1.9 |
|
| 209 |
+
| root | 10,000 | 4.3 | amod | 4,257 | 1.8 |
|
| 210 |
+
| nsubj | 9,527 | 4.1 | | | |
|
| 211 |
+
|
| 212 |
+
**Table 5**: Most frequent dependency relations.
|
| 213 |
+
|
| 214 |
+
The language-specific subtypes originate from the VLSP 2020 parser training data. Notable Vietnamese-specific subtypes include:
|
| 215 |
+
|
| 216 |
+
- `acl:subj` (9,362 occurrences, 4.1%): Subject-oriented adnominal clauses, common in Vietnamese legal constructions (e.g., *quy định quy định...* "regulation that specifies...").
|
| 217 |
+
- `obl:comp` (3,702, 1.6%): Complement obliques.
|
| 218 |
+
- `det:pmod` (2,473, 1.1%): Post-nominal demonstrative determiners (e.g., *này* "this", *đó* "that").
|
| 219 |
+
- `det:clf` (1,974, 0.9%): Classifier determiners.
|
| 220 |
+
- `compound:vmod` (1,730, 0.8%): Verbal compound modifiers.
|
| 221 |
+
- `advmod:neg` (2,114, 0.9%): Negation adverbs (e.g., *không* "not").
|
| 222 |
+
- `mark:pcomp` (733, 0.3%): Prepositional complement markers.
|
| 223 |
+
|
| 224 |
+
Some of these subtypes (e.g., `acl:subj`, `obl:comp`, `mark:pcomp`) are not documented in the official UD Vietnamese language-specific guidelines and originate from the VLSP 2020 shared task annotation scheme. Users working within strict UD conventions may wish to strip subtypes to their base relations.
|
| 225 |
+
|
| 226 |
+
### 5.5 Vietnamese XPOS Distribution
|
| 227 |
+
|
| 228 |
+
The Vietnamese-specific POS tag distribution preserves finer distinctions than UPOS, with 18 tags including subcategories of nouns: classifier nouns (Nc: 1,688), proper nouns (Np: 1,385), and unit nouns (Nu: 535).
|
| 229 |
+
|
| 230 |
+
### 5.6 Annotation Quality Assessment
|
| 231 |
+
|
| 232 |
+
As a silver-standard resource, UDD-1's annotation quality is bounded by the performance of the underlying parser. We characterize quality along several dimensions:
|
| 233 |
+
|
| 234 |
+
**Parser baseline.** The Underthesea dependency parser achieves approximately 76% LAS on the VLSP 2020 benchmark (news-domain, pre-segmented input). Due to domain shift, performance on legal text is expected to differ, though the formal and structured nature of legal prose may partially offset this. No in-domain gold evaluation has been conducted.
|
| 235 |
+
|
| 236 |
+
**Structural validity.** All 10,000 sentences form valid trees: single root, no cycles. 85.3% of trees are projective. These are necessary but not sufficient conditions for annotation quality---any tree satisfies the single-root constraint.
|
| 237 |
+
|
| 238 |
+
**XPOS-UPOS consistency.** Of 230,709 tokens, 19,933 (8.6%) have UPOS tags that differ from the expected mapping of their XPOS tags (Table 1). This divergence has three sources: (1) justified reclassifications of functional words such as auxiliaries (18.8% of mismatches), (2) UPOS forced to satisfy deprel constraints during post-processing (73.8%), and (3) other corrections (7.4%). The forced category represents cases where the UPOS may be less reliable than the XPOS; users requiring accurate POS information should prefer the XPOS column.
|
| 239 |
+
|
| 240 |
+
**Known error patterns.** Manual inspection of sample sentences reveals the following recurring issues:
|
| 241 |
+
|
| 242 |
+
1. *Word segmentation errors*: The parser occasionally merges adjacent tokens (e.g., two noun phrases joined into a single token). These errors propagate from the underlying tokenizer and cannot be corrected by post-processing.
|
| 243 |
+
2. *UPOS forcing artifacts*: When the parser assigns an incorrect deprel (e.g., `nummod` for a noun), the post-processing forces the UPOS to NUM, creating an obviously incorrect POS tag. The XPOS column retains the original (correct) tag in these cases.
|
| 244 |
+
3. *Incomplete sentence filtering*: Some section headings and enumerative fragments pass the sentence selection filters and receive annotations of limited linguistic value.
|
| 245 |
+
|
| 246 |
+
**Word segmentation analysis.** Since the dependency parser performs implicit word segmentation, we conducted a quantitative analysis of tokenization quality (full details in `SEGMENTATION_EVAL.md`). The key findings are:
|
| 247 |
+
|
| 248 |
+
- *Syllable distribution*: 62.1% of tokens are single-syllable, 36.6% are two-syllable, and 1.3% have three or more syllables. NOUN and VERB have the highest average syllable counts (1.64), consistent with Vietnamese compound word patterns.
|
| 249 |
+
- *Long tokens (4+ syllables)*: 868 occurrences (0.38% of tokens) across 111 unique forms. Most are legitimate legal compound terms (e.g., *tố tụng dân sự* "civil procedure", *Tòa án nhân dân* "People's Court"), but some reflect over-merging by the tokenizer.
|
| 250 |
+
- *Cross-boundary merges*: 204 tokens contain uppercase letters mid-token in non-PROPN positions, indicating incorrect merging of adjacent words across sentence boundaries (e.g., *kề Quyền* from "...kề" + "Quyền...", *tố tụng Người* from "...tố tụng" + "Người...").
|
| 251 |
+
- *Inconsistent segmentation*: 158 word forms appear both as single tokens and as split bigrams in different contexts. The most frequent cases include *phiên tòa* (18 single vs. 576 split), *tàu biển* (524 single vs. 32 split), and *hàng hóa* (6 single vs. 175 split). Notable legal terms such as *vụ án* are consistently split (892 occurrences, always as two tokens).
|
| 252 |
+
- *Shared tokenizer*: Comparison with Underthesea's `word_tokenize()` on 300 sampled sentences yields 100% match, confirming that `dependency_parse()` uses the same tokenizer internally. Segmentation errors are therefore inherent to the Underthesea tokenizer and would require an independent tool or gold-standard data to evaluate.
|
| 253 |
+
|
| 254 |
+
**Dictionary-based validation.** Since the tokenizer cannot be evaluated against itself, we validate segmentation against the Viet74K dictionary (72,535 entries from Ho Ngoc Duc's Free Vietnamese Dictionary Project, bundled in Underthesea). This provides an independent reference for detecting potential segmentation errors. Three checks are performed (full details in `SEGMENTATION_EVAL.md` Section 6 and `ANNOTATION_GUIDELINE_WORD_SEGMENTATION.md` Section 8):
|
| 255 |
+
|
| 256 |
+
- *Token coverage*: 98.2% of tokens (226,483 / 230,709) are found in the dictionary. The 1.8% out-of-vocabulary (OOV) tokens include proper nouns (*Việt Nam*, 368 occurrences), legal domain terms absent from the general-purpose dictionary (*tố tụng dân sự*, *điều tra viên*, *thụ lý*), and some genuine segmentation errors. ADJ has the lowest UPOS-level coverage (90.3%), while function word categories achieve 100%.
|
| 257 |
+
- *Under-segmentation detection*: 382 unique multi-syllable tokens are OOV but have all individual syllables present in the dictionary, suggesting possible over-merging. Most are legitimate legal compounds not in the dictionary (e.g., *tòa án nhân dân* "People's Court"), but some are likely errors (e.g., *a khoản* — a cross-boundary merge).
|
| 258 |
+
- *Over-segmentation detection*: 462 unique dictionary words are found split into adjacent tokens (7,373 total occurrences). The most significant cases include *vụ án* "lawsuit" (892 occurrences, always split despite being a dictionary entry), *phạt tù* "imprisonment" (422), *hủy bỏ* "cancel" (147), and *chữa bệnh* "treat illness" (112). These represent systematic tokenizer errors where compound words are incorrectly segmented into their constituent syllables. Some false positives arise when accidental bigrams match dictionary entries (e.g., *bên có* "party that has" matching the dictionary entry for "creditor").
|
| 259 |
+
|
| 260 |
+
The dictionary-based approach has limitations: the dictionary lacks many legal-domain terms (causing false OOV), and cannot distinguish context-dependent segmentation (the same bigram may be one word or two words depending on syntax). Nevertheless, it identifies concrete, actionable segmentation errors---particularly the 892 instances of *vụ án* being split---that could be corrected through post-processing.
|
| 261 |
+
|
| 262 |
+
**Comparison with other silver treebanks.** UD-English-CHILDES (Yang et al., 2025) reports 83.3% LAS for its silver annotations on child-directed speech. Our expected quality is lower (~76% LAS on news, likely somewhat less on legal text), reflecting the more limited training resources available for Vietnamese. Unlike CHILDES, UDD-1 does not include a gold subset for direct quality comparison.
|
| 263 |
+
|
| 264 |
+
## 6. Comparison with Existing Resources
|
| 265 |
+
|
| 266 |
+
| Treebank | Language | Sentences | Tokens | Domain | UD Format | Annotation |
|
| 267 |
+
|:---|:---|---:|---:|:---|:-:|:---|
|
| 268 |
+
| **UDD-1** | **Vietnamese** | **10,000** | **230,709** | **Legal** | **Yes** | **Machine-generated** |
|
| 269 |
+
| UD_Vietnamese-VTB | Vietnamese | 3,323 | 58,069 | News | Yes | Auto-converted |
|
| 270 |
+
| VnDT | Vietnamese | 10,200 | -- | News | No | Manual |
|
| 271 |
+
| BKTreebank | Vietnamese | 6,900 | -- | News | No | Manual |
|
| 272 |
+
| UD_Czech-CLTT | Czech | 1,121 | 35,220 | Legal | Yes | Manual |
|
| 273 |
+
|
| 274 |
+
**Table 6**: Comparison of dependency treebanks. UD_Czech-CLTT (Sevcikova and Zabokrtsky, 2016) is included as the most comparable legal-domain UD treebank.
|
| 275 |
+
|
| 276 |
+
UDD-1 is approximately 3x larger than UD_Vietnamese-VTB in sentences and 4x larger in tokens, and is the first Vietnamese UD treebank from the legal domain. Compared to the only other legal-domain UD treebank (UD_Czech-CLTT), UDD-1 is approximately 9x larger in sentences, though CLTT is gold-standard while UDD-1 is silver-standard. While VnDT and BKTreebank are comparable in size, they do not follow UD annotation conventions and are therefore less suitable for cross-lingual UD research.
|
| 277 |
+
|
| 278 |
+
## 7. Data Format and Access
|
| 279 |
+
|
| 280 |
+
The treebank is distributed in CoNLL-U format following the UD specification, with the standard 10-column representation (ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC). Files follow the UD naming convention: `vi_udd-ud-{train,dev,test}.conllu`.
|
| 281 |
+
|
| 282 |
+
The dataset is also available in HuggingFace Datasets format with JSONL backing, loadable via:
|
| 283 |
+
|
| 284 |
+
```python
|
| 285 |
+
from datasets import load_dataset
|
| 286 |
+
dataset = load_dataset("undertheseanlp/UDD-1")
|
| 287 |
+
```
|
| 288 |
+
|
| 289 |
+
Validation is performed using the official UD validator (`udtools`), a vendored copy of which is included in the repository.
|
| 290 |
+
|
| 291 |
+
**Software versions.** Annotations were generated using Underthesea v2.1.0 with PyTorch 2.0 on CUDA-enabled GPUs. The conversion pipeline source code is available in the repository at `src/convert_to_ud.py`.
|
| 292 |
+
|
| 293 |
+
**License.** UDD-1 is released under the Creative Commons Attribution-ShareAlike 4.0 International License (CC BY-SA 4.0), consistent with the source corpus licensing.
|
| 294 |
+
|
| 295 |
+
## 8. Conclusion
|
| 296 |
+
|
| 297 |
+
We have presented UDD-1, a silver-standard Vietnamese Universal Dependencies treebank of 10,000 sentences from the legal domain. The treebank is generated through a pipeline combining neural dependency parsing with extensive rule-based post-processing to enforce UD v2 constraints. UDD-1 substantially increases the available Vietnamese UD data, introduces the legal domain to Vietnamese UD resources, and provides a reproducible methodology for creating UD-compliant treebanks at scale. As a silver-standard resource, users should be aware that annotation quality is bounded by the underlying parser (~76% LAS on news), and the XPOS column provides more reliable POS information than the post-processed UPOS column in cases of disagreement.
|
| 298 |
+
|
| 299 |
+
Future work includes: (1) conducting human evaluation of annotation quality through expert sampling of 100+ sentences to establish LAS/UAS on legal text, (2) expanding the treebank to additional domains (news, fiction), (3) developing an improved post-processing strategy that prefers changing deprels over forcing UPOS when XPOS strongly indicates the correct category, and (4) using UDD-1 as training data for domain-adapted Vietnamese dependency parsers.
|
| 300 |
+
|
| 301 |
+
## Limitations
|
| 302 |
+
|
| 303 |
+
1. **Annotation quality**: As a machine-generated treebank, UDD-1 is a silver-standard resource. Annotation accuracy is bounded by the performance of the underlying Underthesea parser, which achieves approximately 76% LAS on the VLSP 2020 benchmark (news domain). This implies roughly 1 in 4 dependency arcs may be incorrect, a substantial error rate for a treebank resource. Due to domain shift, accuracy on legal text may differ. No systematic human evaluation of annotation quality has been conducted.
|
| 304 |
+
|
| 305 |
+
2. **UPOS-deprel forcing**: The post-processing pipeline forces 8.6% of tokens to have UPOS tags that disagree with their XPOS-derived mapping, primarily to satisfy UD structural constraints. In cases where the parser assigns an incorrect dependency relation, this propagates the error to the UPOS column. The XPOS column is more reliable for POS information.
|
| 306 |
+
|
| 307 |
+
3. **Domain specificity and bias**: The current release covers only the legal domain, drawn from Vietnamese legal code documents. Legal text has distinctive syntactic properties (long nominal chains, enumerative constructions, formal register) that may not generalize to other domains. The corpus may overrepresent certain areas of law and is restricted to formal legislative language, excluding legal correspondence, court decisions, and contracts.
|
| 308 |
+
|
| 309 |
+
4. **Word segmentation errors**: The dependency parser's tokenizer occasionally produces incorrect word boundaries (e.g., merging adjacent words into a single token). These errors cannot be corrected by post-processing and affect downstream annotation quality.
|
| 310 |
+
|
| 311 |
+
5. **Lemmatization**: Lemmas are approximated by lowercasing, which does not capture true lemmatization. Vietnamese has minimal inflectional morphology, making this approximation reasonable but imperfect.
|
| 312 |
+
|
| 313 |
+
6. **Morphological features**: The FEATS column is left empty. While Vietnamese has limited morphology compared to Indo-European languages, features such as `Polarity`, `Mood`, and `Voice` could be annotated for functional words.
|
| 314 |
+
|
| 315 |
+
7. **Post-processing scope**: The rule-based corrections address structural violations detectable by the UD validator, but do not correct semantically incorrect parses (e.g., wrong head attachment that happens to satisfy UD constraints).
|
| 316 |
+
|
| 317 |
+
8. **Non-standard deprel subtypes**: Some of the 45 language-specific deprel subtypes (e.g., `acl:subj`, `obl:comp`, `mark:pcomp`) are not documented in the official UD Vietnamese guidelines and originate from the VLSP 2020 parser training data.
|
| 318 |
+
|
| 319 |
+
9. **Evaluation**: We provide annotation quality analysis (Section 5.6) including XPOS-UPOS consistency statistics and known error patterns, but do not evaluate against manually annotated gold data for this domain.
|
| 320 |
+
|
| 321 |
+
## References
|
| 322 |
+
|
| 323 |
+
- de Marneffe, M.-C., Manning, C.D., Nivre, J., and Zeman, D. (2021). Universal Dependencies. *Computational Linguistics*, 47(2):255--308.
|
| 324 |
+
|
| 325 |
+
- Dozat, T. and Manning, C.D. (2017). Deep Biaffine Attention for Neural Dependency Parsing. In *Proceedings of ICLR 2017*.
|
| 326 |
+
|
| 327 |
+
- Ha, M.L., Nguyen, T.M.H., Vu, X.L., Nguyen, T.L., Phan, T.H., and Le, V.C. (2020). VLSP 2020 Shared Task: Universal Dependency Parsing for Vietnamese. In *Proceedings of the 7th International Workshop on Vietnamese Language and Speech Processing (VLSP 2020)*.
|
| 328 |
+
|
| 329 |
+
- Huynh, D., Le, V.H., Truong, C.A., Huynh, C.M., Nguyen, Y.T., and Nguyen, Q.T. (2025). Domain Generalization in Vietnamese Dependency Parsing. In *Proceedings of SOICT 2024*, CCIS vol. 2350, Springer.
|
| 330 |
+
|
| 331 |
+
- Nguyen, D.Q. and Nguyen, A.T. (2020). PhoBERT: Pre-trained language models for Vietnamese. In *Findings of EMNLP 2020*, pp. 1037--1042.
|
| 332 |
+
|
| 333 |
+
- Nguyen, D.Q. and Nguyen, A.T. (2021). PhoNLP: A joint multi-task learning model for Vietnamese part-of-speech tagging, named entity recognition and dependency parsing. In *Proceedings of NAACL 2021: Demonstrations*, pp. 1--7.
|
| 334 |
+
|
| 335 |
+
- Nguyen, D.Q., Nguyen, D.Q., Pham, S.B., Nguyen, P.-T., and Le Nguyen, M. (2014). From Treebank Conversion to Automatic Dependency Parsing for Vietnamese. In *Proceedings of NLDB 2014*, pp. 196--207.
|
| 336 |
+
|
| 337 |
+
- Nguyen, D.-V., Phan, T.C., Nguyen, Q.-N., Nguyen, K.V., and Nguyen, N.L.-T. (2024). An Attempt to Develop a Neural Parser based on Simplified HPSG on Vietnamese. *arXiv preprint arXiv:2411.17270*.
|
| 338 |
+
|
| 339 |
+
- Nguyen, K.-H. (2018). BKTreebank: Building a Vietnamese Dependency Treebank. In *Proceedings of LREC 2018*, Miyazaki, Japan.
|
| 340 |
+
|
| 341 |
+
- Nguyen, P.-T., Vu, X.-L., Nguyen, T.M.H., Nguyen, V.-H., and Le, H.-P. (2009). Building a Large Syntactically-Annotated Corpus of Vietnamese. In *Proceedings of the Third Linguistic Annotation Workshop (LAW III)*, pp. 182--185.
|
| 342 |
+
|
| 343 |
+
- Nivre, J. et al. (2020). Universal Dependencies v2: An Evergrowing Multilingual Treebank Collection. In *Proceedings of LREC 2020*, pp. 4034--4043.
|
| 344 |
+
|
| 345 |
+
- Sevcikova, M. and Zabokrtsky, Z. (2016). Czech Legal Text Treebank. In *Proceedings of LREC 2016*, Portoroz, Slovenia.
|
| 346 |
+
|
| 347 |
+
- Vu, T., Nguyen, D.Q., Nguyen, D.Q., Dras, M., and Johnson, M. (2018). VnCoreNLP: A Vietnamese Natural Language Processing Toolkit. In *Proceedings of NAACL 2018: Demonstrations*, pp. 56--60.
|
| 348 |
+
|
| 349 |
+
- Yang, X. et al. (2025). UD-English-CHILDES: A Collected Resource of Gold and Silver Universal Dependencies Trees for Child Language Interactions. In *Proceedings of UDW 2025*.
|
| 350 |
+
|
| 351 |
+
- Zeman, D. et al. (2018). CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. In *Proceedings of the CoNLL 2018 Shared Task*, pp. 1--21.
|
TECHNICAL_REPORT_REVIEW.md
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Review: UDD-1: A Large-Scale Vietnamese Universal Dependencies Treebank
|
| 2 |
+
|
| 3 |
+
**Reviewed**: TECHNICAL_REPORT.md (revised version)
|
| 4 |
+
**Date**: 2026-02-08
|
| 5 |
+
**Review format**: ACL Rolling Review (ARR)
|
| 6 |
+
**Note**: This is a re-review of the revised manuscript. The original review identified 6 major weaknesses; this review assesses how they were addressed.
|
| 7 |
+
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
## Paper Summary
|
| 11 |
+
|
| 12 |
+
This paper presents UDD-1, a silver-standard Vietnamese Universal Dependencies treebank of 10,000 sentences (230,709 tokens) from the legal domain. Annotations are machine-generated using the Underthesea NLP toolkit's biaffine parser trained on VLSP 2020 data, followed by a multi-pass rule-based post-processing pipeline enforcing UD v2 constraints. The revised manuscript adds an annotation quality assessment section with quantified XPOS-UPOS consistency analysis, documents language-specific deprel subtypes, and substantially expands the related work and limitations sections.
|
| 13 |
+
|
| 14 |
+
## Summary of Strengths
|
| 15 |
+
|
| 16 |
+
1. **Genuine resource contribution (Sections 1, 6, Table 6)**: UDD-1 fills a real gap as the first Vietnamese UD treebank outside the news domain and the first legal-domain UD treebank for any Southeast Asian language. At 10,000 sentences it is 3x larger than UD_Vietnamese-VTB. The cross-language comparison with UD_Czech-CLTT (Table 6) effectively contextualizes this contribution.
|
| 17 |
+
|
| 18 |
+
2. **Transparent quality analysis (Section 5.6)**: The revised manuscript adds a substantive annotation quality assessment that quantifies the XPOS-UPOS mismatch rate (8.6%), categorizes mismatches into justified (18.8%), forced (73.8%), and other (7.4%), and honestly describes known error patterns including word segmentation errors, UPOS forcing artifacts, and incomplete sentence filtering. This level of self-analysis is commendable and rare for silver treebank papers.
|
| 19 |
+
|
| 20 |
+
3. **Well-documented post-processing with trade-off analysis (Section 4.3)**: The four-pass pipeline is described in detail with a clear explanation of the UPOS-vs-deprel decision strategy. The explicit recommendation that "users who need reliable POS tags independent of syntactic relations should prefer the XPOS column" is practical and honest.
|
| 21 |
+
|
| 22 |
+
4. **Comprehensive related work (Sections 2.1--2.5)**: The revised manuscript covers Vietnamese treebanks (VnDT, BKTreebank, VTB, DGDT), parsing methods (VnCoreNLP through PhoNLP), shared tasks (VLSP 2019, 2020), silver treebank methodology (UD-CHILDES), legal domain treebanks (Czech CLTT), and the UD framework. All in-text citations have matching reference entries.
|
| 23 |
+
|
| 24 |
+
5. **Thorough limitations section (9 items)**: The expanded limitations section honestly addresses annotation quality (including the "1 in 4 arcs may be incorrect" framing), UPOS-deprel forcing, domain bias, word segmentation, lemmatization, missing morphological features, post-processing scope, non-standard subtypes, and the absence of gold evaluation.
|
| 25 |
+
|
| 26 |
+
6. **Reproducibility**: Software versions are specified (Underthesea v2.1.0, PyTorch 2.0), code is publicly available (`src/convert_to_ud.py`), data is on HuggingFace, and the split methodology is documented (Section 3.3).
|
| 27 |
+
|
| 28 |
+
## Summary of Weaknesses
|
| 29 |
+
|
| 30 |
+
1. **No gold-standard quality evaluation (Limitation 9)**: Despite adding the quality analysis section, the paper still lacks any evaluation against manually annotated gold data. The ~76% LAS figure is from the VLSP 2020 benchmark on news text, not on legal text. The actual annotation quality on legal text remains unknown. Even a small sample evaluation (50 sentences) would transform this from a "we think quality is around X" to a measured claim. This remains the most significant weakness.
|
| 31 |
+
|
| 32 |
+
2. **UPOS forcing still affects 73.8% of mismatched tokens**: The revised paper documents this issue transparently (strength), but does not fix it. The pipeline still forces UPOS to match potentially incorrect deprels for the majority of XPOS-UPOS mismatches. The paper acknowledges this as future work (Conclusion item 3) but it would strengthen the resource to implement the alternative strategy (preferring deprel changes when XPOS strongly indicates the correct category) before publication.
|
| 33 |
+
|
| 34 |
+
3. **Sequential split may introduce topic bias (Section 3.3)**: The paper now documents the split methodology, which is sequential by position. This means dev and test sets may come from a narrow subset of legal topics. The paper acknowledges this ("does not guarantee topic diversity") but the lack of stratification is a concern for downstream evaluation.
|
| 35 |
+
|
| 36 |
+
4. **VLSP 2019 shared task not cited**: Section 2.2 mentions "The VLSP 2019 shared task introduced dependency parsing evaluation with approximately 4,000 sentences" but provides no citation. This should either be cited properly or removed.
|
| 37 |
+
|
| 38 |
+
## Scores
|
| 39 |
+
|
| 40 |
+
| Criterion | Score | Previous | Change |
|
| 41 |
+
|-----------|:-----:|:--------:|:------:|
|
| 42 |
+
| **Soundness** | **3** | 2 | +1 |
|
| 43 |
+
| **Excitement** | **3** | 3 | -- |
|
| 44 |
+
| **Overall Assessment** | **3** | 2 | +1 |
|
| 45 |
+
| **Reproducibility** | **4** | 4 | -- |
|
| 46 |
+
| **Confidence** | **4** | 4 | -- |
|
| 47 |
+
|
| 48 |
+
### Score Justification
|
| 49 |
+
|
| 50 |
+
- **Soundness 3 (up from 2)**: The addition of Section 5.6 (Annotation Quality Assessment) with quantified XPOS-UPOS analysis and transparent discussion of known error patterns substantially addresses the previous concern about missing quality evaluation. The structural claims are now properly framed ("necessary but not sufficient conditions"). The remaining gap is the absence of gold evaluation on legal text, which keeps this at 3 rather than 4.
|
| 51 |
+
|
| 52 |
+
- **Excitement 3 (unchanged)**: The resource fills a genuine gap. The quality analysis is a useful contribution methodology-wise. If gold evaluation were added, this would move to 4.
|
| 53 |
+
|
| 54 |
+
- **Overall 3 (up from 2)**: The revised paper is now borderline acceptable. The transparent quality analysis, comprehensive related work, expanded limitations, and documented methodology bring the paper to a level appropriate for a Findings track or resource-focused venue. The primary remaining issue (no gold evaluation) is clearly scoped as future work.
|
| 55 |
+
|
| 56 |
+
- **Reproducibility 4 (unchanged)**: Software versions now specified. Code and data publicly available.
|
| 57 |
+
|
| 58 |
+
## Detailed Comments
|
| 59 |
+
|
| 60 |
+
### Technical Soundness
|
| 61 |
+
|
| 62 |
+
The revised manuscript substantially improves technical transparency. Key improvements:
|
| 63 |
+
|
| 64 |
+
1. **Section 5.6** provides a multi-dimensional quality assessment (parser baseline, structural validity, XPOS-UPOS consistency, known error patterns, comparison with CHILDES). The framing is honest: "These are necessary but not sufficient conditions for annotation quality---any tree satisfies the single-root constraint."
|
| 65 |
+
|
| 66 |
+
2. **Section 4.3** now documents the UPOS-vs-deprel decision strategy with explicit trade-off analysis, including the 8.6% mismatch rate and its breakdown. The recommendation to use XPOS for reliable POS is practical.
|
| 67 |
+
|
| 68 |
+
3. **Section 4.1** now explicitly discusses the fallback logic for failed parses and the parser's domain shift limitation.
|
| 69 |
+
|
| 70 |
+
Remaining concern: The ~76% LAS figure is used throughout as a quality proxy, but this is from news text. The paper should more carefully distinguish between "76% LAS on VLSP 2020 news benchmark" and "unknown LAS on legal text." The claim in the abstract ("~76% LAS on the VLSP 2020 benchmark") is accurate but could mislead readers into thinking this is the expected quality of the legal annotations.
|
| 71 |
+
|
| 72 |
+
### Novelty and Contribution
|
| 73 |
+
|
| 74 |
+
The contribution is primarily a new resource with a secondary methodological contribution (the documented pipeline). The revised paper strengthens the methodological side with the quality analysis framework (Section 5.6), which could serve as a template for other silver treebank papers. The legal domain contribution remains genuinely novel for Vietnamese.
|
| 75 |
+
|
| 76 |
+
### Clarity and Presentation
|
| 77 |
+
|
| 78 |
+
The paper is well-organized and reads clearly. The new sections integrate smoothly. Section numbering is now consistent (2.1--2.5). Tables are informative and well-formatted. The detailed listing of deprel subtypes in Section 5.4 is a useful addition.
|
| 79 |
+
|
| 80 |
+
One minor structural issue: the paper could benefit from a brief summary table or figure showing the full pipeline flow (raw text -> cleaning -> filtering -> parsing -> POS mapping -> post-processing -> validation -> output).
|
| 81 |
+
|
| 82 |
+
### Reproducibility Assessment
|
| 83 |
+
|
| 84 |
+
Improved from the previous version:
|
| 85 |
+
- Software versions specified: Underthesea v2.1.0, PyTorch 2.0
|
| 86 |
+
- Split methodology documented (Section 3.3)
|
| 87 |
+
- Code location specified (`src/convert_to_ud.py`)
|
| 88 |
+
- Data available on HuggingFace
|
| 89 |
+
- Validation tool vendored in repository
|
| 90 |
+
|
| 91 |
+
The remaining gap is the lack of the exact model checkpoint hash. Different Underthesea v2.1.0 installations might bundle slightly different model weights.
|
| 92 |
+
|
| 93 |
+
### Limitations and Ethics
|
| 94 |
+
|
| 95 |
+
The limitations section is now comprehensive (9 items) and honest. It covers annotation quality, UPOS forcing, domain bias (including the specific note about excluding legal correspondence and contracts), word segmentation, lemmatization, morphological features, post-processing scope, non-standard subtypes, and evaluation gaps. This is a model limitations section for a silver treebank paper.
|
| 96 |
+
|
| 97 |
+
The domain bias discussion (Limitation 3) could be slightly expanded to note whether the legal documents span different time periods, which could affect language use.
|
| 98 |
+
|
| 99 |
+
## Related Work Research
|
| 100 |
+
|
| 101 |
+
### Papers Found
|
| 102 |
+
|
| 103 |
+
| Paper | Year | Method | Results | Relevance |
|
| 104 |
+
|-------|------|--------|---------|-----------|
|
| 105 |
+
| PhoNLP (Nguyen & Nguyen) | 2021 | Multi-task PhoBERT | 79.11% LAS on VnDT | SOTA Vietnamese parsing -- **now cited** |
|
| 106 |
+
| DGDT (Huynh et al.) | 2025 | Domain generalization | 3-5% LAS degradation | Domain shift motivation -- **now cited** |
|
| 107 |
+
| UD-English-CHILDES (Yang et al.) | 2025 | Stanza silver | 83.3% LAS quality | Silver treebank reference -- **now cited** |
|
| 108 |
+
| Czech CLTT (Sevcikova & Zabokrtsky) | 2016 | Manual annotation | 1,121 sentences legal | Legal domain comparable -- **now cited** |
|
| 109 |
+
| HPSG parser (Nguyen et al.) | 2024 | HPSG + PhoBERT | 15% non-compliant trees | Treebank quality -- **now cited** |
|
| 110 |
+
|
| 111 |
+
### Missing Citations
|
| 112 |
+
|
| 113 |
+
- **VLSP 2019 shared task**: Mentioned in text (Section 2.2) but not formally cited with a reference entry.
|
| 114 |
+
- All other previously missing citations (DGDT, CHILDES, Czech CLTT, HPSG) are now properly cited.
|
| 115 |
+
|
| 116 |
+
### SOTA Verification
|
| 117 |
+
|
| 118 |
+
- **Claimed**: PhoNLP achieves 79.11% LAS, 85.47% UAS on VnDT -- **Verified correct**
|
| 119 |
+
- **Claimed**: Best VLSP 2020 system achieves 76.27% LAS -- **Verified correct**
|
| 120 |
+
- **Claimed**: UD_Vietnamese-VTB has 3,323 sentences -- **Verified correct**
|
| 121 |
+
- **Claimed**: Only one official Vietnamese UD treebank exists -- **Verified correct**
|
| 122 |
+
- **Claimed**: DGDT shows 3.27% UAS / 5.09% LAS degradation -- **Consistent with source**
|
| 123 |
+
- **Claimed**: Czech CLTT has 1,121 sentences, 35,220 tokens -- **Verified correct**
|
| 124 |
+
|
| 125 |
+
## Questions for Authors
|
| 126 |
+
|
| 127 |
+
1. Could you run the Underthesea parser on 50-100 sentences from the legal corpus that have been manually annotated by a Vietnamese linguist, to report in-domain LAS/UAS? This single addition would move the paper from borderline to solid accept.
|
| 128 |
+
|
| 129 |
+
2. The sequential split (Section 3.3) may result in dev/test sets covering only a narrow range of legal topics. Have you verified that the legal topics in dev/test are representative of the training set?
|
| 130 |
+
|
| 131 |
+
3. What percentage of the 10,000 sentences hit the parser fallback (HEAD=0 for all tokens)? You estimate <0.1% -- could you verify this from logs or by checking the data for sentences where all non-root tokens have `dep` relations?
|
| 132 |
+
|
| 133 |
+
## Minor Issues
|
| 134 |
+
|
| 135 |
+
- Section 2.2: "The VLSP 2019 shared task introduced dependency parsing evaluation with approximately 4,000 sentences" has no citation. Either add the reference or remove the specific claim.
|
| 136 |
+
- Section 5.4: The example for `acl:subj` reads "quy định quy định..." which appears to repeat the same word. Clarify if this is intentional (legal Vietnamese uses such constructions) or a typo.
|
| 137 |
+
- The abstract mentions "Underthesea NLP toolkit (v2.1.0) Biaffine attention-based dependency parser" -- a minor grammar fix: add "apostrophe-s" or restructure to "Underthesea NLP toolkit's (v2.1.0) Biaffine attention-based dependency parser".
|
| 138 |
+
|
| 139 |
+
## Suggestions for Improvement
|
| 140 |
+
|
| 141 |
+
1. **Add gold evaluation** (highest priority): Manually annotate 50-100 randomly sampled sentences and report LAS/UAS. This is the single most impactful improvement.
|
| 142 |
+
|
| 143 |
+
2. **Implement the improved UPOS-fixing strategy**: The conclusion mentions "developing an improved post-processing strategy that prefers changing deprels over forcing UPOS when XPOS strongly indicates the correct category." Implementing this before publication would reduce the 8.6% XPOS-UPOS mismatch rate and improve the resource quality.
|
| 144 |
+
|
| 145 |
+
3. **Add a pipeline diagram**: A figure showing the full annotation pipeline flow would improve readability.
|
| 146 |
+
|
| 147 |
+
4. **Cite or remove the VLSP 2019 reference**: Either find the proper citation or rephrase to avoid the uncited claim.
|
| 148 |
+
|
| 149 |
+
5. **Consider a stratified re-split**: A random or stratified split (preserving document boundaries) would produce more representative dev/test sets than the current sequential split.
|
| 150 |
+
|
| 151 |
+
## Assessment of Revisions (vs. Previous Review)
|
| 152 |
+
|
| 153 |
+
| Previous Weakness | Status | Notes |
|
| 154 |
+
|------------------|--------|-------|
|
| 155 |
+
| 1. No quality evaluation | **Partially addressed** | Section 5.6 added with XPOS-UPOS analysis. No gold evaluation yet. |
|
| 156 |
+
| 2. UPOS corruption in data | **Addressed (documented)** | Trade-off quantified (8.6%, 73.8% forced). XPOS recommended for reliable POS. |
|
| 157 |
+
| 3. UPOS-forcing systematic bias | **Addressed (documented)** | Decision strategy explained. Acknowledged as future work to improve. |
|
| 158 |
+
| 4. 77 deprel types undocumented | **Addressed** | 32 universal + 45 subtypes classified. 7 key subtypes listed and described. Non-standard subtypes flagged. |
|
| 159 |
+
| 5. Split methodology missing | **Addressed** | Section 3.3 documents sequential split with rationale. |
|
| 160 |
+
| 6. Incomplete related work | **Addressed** | DGDT, Czech CLTT, UD-CHILDES, HPSG parser, VLSP 2019/2020 all covered. 15 references, all verified. |
|
| 161 |
+
|
| 162 |
+
## Evaluation Checklist
|
| 163 |
+
|
| 164 |
+
### Methodology
|
| 165 |
+
- [x] Research questions clearly stated
|
| 166 |
+
- [x] Methods appropriate for research questions
|
| 167 |
+
- [x] Baselines appropriate and fairly compared
|
| 168 |
+
- [ ] Statistical significance properly addressed (N/A for resource paper)
|
| 169 |
+
- [x] Limitations of approach acknowledged
|
| 170 |
+
|
| 171 |
+
### Experiments
|
| 172 |
+
- [x] Datasets properly described (source, size, splits, preprocessing)
|
| 173 |
+
- [x] Evaluation metrics appropriate for the task (quality analysis in 5.6)
|
| 174 |
+
- [x] Training details sufficient for reproduction
|
| 175 |
+
- [x] Ablation studies or analysis provided (XPOS-UPOS breakdown)
|
| 176 |
+
- [x] Results support the claims made
|
| 177 |
+
|
| 178 |
+
### Presentation
|
| 179 |
+
- [x] Abstract accurately summarizes contributions
|
| 180 |
+
- [x] Introduction motivates the problem
|
| 181 |
+
- [x] Related work comprehensive and fair
|
| 182 |
+
- [x] Figures/tables readable and informative
|
| 183 |
+
- [x] Conclusion matches actual contributions
|
| 184 |
+
|
| 185 |
+
### Related Work Verification
|
| 186 |
+
- [x] Key prior work on same task is cited
|
| 187 |
+
- [x] Baseline comparisons use current methods
|
| 188 |
+
- [x] SOTA claims are accurate and up-to-date
|
| 189 |
+
- [x] No significant missing references (minor: VLSP 2019)
|
| 190 |
+
- [x] Fair characterization of competing approaches
|
| 191 |
+
|
| 192 |
+
### Responsible NLP
|
| 193 |
+
- [x] Limitations section present and substantive (9 items)
|
| 194 |
+
- [ ] Potential negative impacts discussed
|
| 195 |
+
- [ ] Data collection ethics addressed
|
| 196 |
+
- [x] Bias considerations mentioned (Limitation 3)
|
scripts/udtools/tests/test-cases/valid/empty-file.conllu → active_learning/README.md
RENAMED
|
File without changes
|
active_learning/references/research_active_learning_ud/README.md
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Literature Review: Active Learning for Universal Dependencies Treebank Construction
|
| 2 |
+
|
| 3 |
+
**Date**: 2026-02-10
|
| 4 |
+
**Research Questions**:
|
| 5 |
+
- RQ1: What active learning methods have been applied to dependency parsing/treebank construction?
|
| 6 |
+
- RQ2: How effective is active learning for reducing annotation cost in UD treebanks?
|
| 7 |
+
- RQ3: What query strategies work best for syntactic annotation tasks?
|
| 8 |
+
- RQ4: How has active learning been applied to low-resource languages?
|
| 9 |
+
|
| 10 |
+
## Executive Summary
|
| 11 |
+
|
| 12 |
+
Active learning (AL) has been applied to dependency parsing and treebank construction since the early 2000s, with a rich body of work spanning query strategies (uncertainty sampling, query-by-committee, diversity-aware batch selection), annotation granularity (full vs. partial annotation), and integration with modern pre-trained models. Key findings show that AL can reduce annotation costs by 30-70% for structured NLP tasks, that partial annotation strategies are particularly effective for dependency parsing, and that diversity-aware batch selection outperforms simple uncertainty sampling. However, **no published work specifically applies active learning to Vietnamese dependency treebank construction**, representing a clear research gap directly relevant to the UDD-1 project.
|
| 13 |
+
|
| 14 |
+
## Methodology
|
| 15 |
+
|
| 16 |
+
- **Search sources**: Semantic Scholar API, ACL Anthology (via WebSearch), Google Scholar, arXiv
|
| 17 |
+
- **Search terms**: "active learning dependency parsing", "active learning treebank annotation", "active learning universal dependencies", "query strategy parsing", "sample selection syntactic annotation", "active learning low-resource treebank", "active learning Vietnamese NLP"
|
| 18 |
+
- **Timeframe**: 1998-2025
|
| 19 |
+
- **Inclusion criteria**: Peer-reviewed NLP/CL venues; directly related to active learning for parsing or treebank construction
|
| 20 |
+
|
| 21 |
+
### PRISMA Flow
|
| 22 |
+
- Records identified: ~120
|
| 23 |
+
- Duplicates removed: ~40
|
| 24 |
+
- Records screened: ~80
|
| 25 |
+
- Records excluded: ~45 (tangentially related)
|
| 26 |
+
- Studies included: **35**
|
| 27 |
+
|
| 28 |
+
---
|
| 29 |
+
|
| 30 |
+
## Findings
|
| 31 |
+
|
| 32 |
+
### RQ1: What active learning methods have been applied to dependency parsing?
|
| 33 |
+
|
| 34 |
+
Active learning for dependency parsing has evolved through three main phases:
|
| 35 |
+
|
| 36 |
+
#### Phase 1: Foundational Work (2002-2009)
|
| 37 |
+
|
| 38 |
+
The earliest work applied standard AL strategies to constituency and dependency parsing:
|
| 39 |
+
|
| 40 |
+
- **Tang et al. (2002)** — *Active Learning for Statistical Natural Language Parsing* (ACL): First application of AL to statistical parsing using uncertainty-based and clustering-based sample selection. Showed that selecting sentences where the parser is most uncertain about the correct parse leads to faster learning curves than random selection.
|
| 41 |
+
|
| 42 |
+
- **Hwa (2004)** — *Sample Selection for Statistical Parsing* (Computational Linguistics, 30(3):253-276): The seminal paper establishing that uncertainty sampling is a robust criterion for selecting training examples for parsing. Demonstrated significant reduction in annotated training corpus size across two syntactic learning tasks and two parser types.
|
| 43 |
+
|
| 44 |
+
- **Baldridge & Osborne (2004)** — *Active Learning and the Total Cost of Annotation* (EMNLP): Critically showed that standard AL evaluation ignores that different instances have different annotation costs. Longer sentences cost more to annotate but are often selected by uncertainty-based methods, potentially negating AL's benefits when measuring by real annotation time rather than number of sentences.
|
| 45 |
+
|
| 46 |
+
- **Settles & Craven (2008)** — *An Analysis of Active Learning Strategies for Sequence Labeling Tasks* (EMNLP): Comprehensive analysis of AL strategies for structured NLP tasks. Compared uncertainty sampling, query-by-committee, and information density methods. Found that combining uncertainty with representativeness (information density) performs most robustly.
|
| 47 |
+
|
| 48 |
+
#### Phase 2: Parsing-Specific Innovations (2010-2018)
|
| 49 |
+
|
| 50 |
+
Researchers developed AL strategies tailored to the specific structure of dependency trees:
|
| 51 |
+
|
| 52 |
+
- **Sassano & Kurohashi (2010)** — *Using Smaller Constituents Rather Than Sentences in Active Learning for Japanese Dependency Parsing* (ACL): Key insight that selecting sub-sentential units (bunsetsus/chunks) rather than full sentences as the annotation unit improves AL efficiency for dependency parsing. This avoids the problem of annotating "easy" parts of a sentence just to get the "hard" parts annotated.
|
| 53 |
+
|
| 54 |
+
- **Mirroshandel & Nasr (2011)** — *Active Learning for Dependency Parsing Using Partially Annotated Sentences* (IWPT): Early exploration of partial annotation — only annotating the most uncertain dependency arcs within a sentence rather than the full tree. This reduces annotator effort per sentence.
|
| 55 |
+
|
| 56 |
+
- **Majidi & Crane (2013)** — *Active Learning for Dependency Parsing by A Committee of Parsers* (IWPT): Applied query-by-committee (QBC) to dependency parsing, where multiple diverse parsers vote on the most informative sentences. Disagreement among committee members indicates high informativeness.
|
| 57 |
+
|
| 58 |
+
- **Flannery & Mori (2015)** — *Combining Active Learning and Partial Annotation for Domain Adaptation of a Japanese Dependency Parser* (IWPT): Combined AL with partial annotation for domain adaptation, showing that this combination is particularly effective when adapting parsers to new domains.
|
| 59 |
+
|
| 60 |
+
- **Li et al. (2016)** — *Active Learning for Dependency Parsing with Partial Annotation* (ACL): The most comprehensive study of partial annotation for dependency parsing AL. Proposed selecting the most informative sub-structures (individual dependency arcs) for annotation using head entropy as the selection criterion. Showed that partial annotation AL achieves comparable performance to full annotation AL with significantly less annotation effort.
|
| 61 |
+
|
| 62 |
+
- **van Cranenburgh (2018)** — *Active DOP: A Constituency Treebank Annotation Tool with Online Learning* (COLING): Practical annotation tool that incrementally learns from each annotated sentence and uses the updated model to propose annotations for the next sentence. Supports active learning for treebank annotation.
|
| 63 |
+
|
| 64 |
+
#### Phase 3: Modern Deep Learning Era (2019-2025)
|
| 65 |
+
|
| 66 |
+
- **Shi et al. (2021)** — *Diversity-Aware Batch Active Learning for Dependency Parsing* (NAACL): State-of-the-art work using determinantal point processes (DPPs) to enforce diversity in batch AL for dependency parsing. Showed that diversity-aware batch selection is superior to diversity-agnostic strategies, especially in early learning stages. Key finding: simply selecting the most uncertain sentences often yields redundant batches; enforcing diversity yields faster learning curves.
|
| 67 |
+
|
| 68 |
+
- **Zhang et al. (2023)** — *Data-efficient Active Learning for Structured Prediction with Partial Annotation and Self-Training* (Findings of EMNLP): Most recent comprehensive work. Combines partial annotation (selecting most informative sub-structures) with self-training (using model predictions as pseudo-labels for un-annotated parts). Uses an adaptive error estimator to dynamically adjust the partial selection ratio. Evaluated across four structured prediction tasks including dependency parsing.
|
| 69 |
+
|
| 70 |
+
### RQ2: How effective is active learning for reducing annotation cost?
|
| 71 |
+
|
| 72 |
+
| Study | Task | Cost Reduction | Metric |
|
| 73 |
+
|-------|------|---------------|--------|
|
| 74 |
+
| Hwa (2004) | Constituency parsing | ~50% fewer sentences | Same accuracy |
|
| 75 |
+
| Sassano & Kurohashi (2010) | Japanese dep. parsing | ~30% fewer annotations | Same UAS |
|
| 76 |
+
| Li et al. (2016) | Dep. parsing (partial) | ~40-60% less arc annotation | Same LAS |
|
| 77 |
+
| Shi et al. (2021) | Dep. parsing (DPP) | ~20-30% fewer sentences vs. random | Same LAS in early stages |
|
| 78 |
+
| Zhang et al. (2023) | Structured prediction | Significant reduction via partial + self-training | Across 4 tasks |
|
| 79 |
+
|
| 80 |
+
**Key insight for UDD-1**: The combination of partial annotation + self-training (Zhang et al., 2023) is the most promising modern approach. For a silver-standard treebank like UDD-1 that already has automatic annotations, an AL approach could:
|
| 81 |
+
1. Identify sentences where the parser is most uncertain
|
| 82 |
+
2. Select only the most uncertain dependency arcs within those sentences for human correction
|
| 83 |
+
3. Use the corrected arcs to retrain and improve the overall treebank
|
| 84 |
+
|
| 85 |
+
### RQ3: What query strategies work best for syntactic annotation?
|
| 86 |
+
|
| 87 |
+
#### Strategy Comparison
|
| 88 |
+
|
| 89 |
+
| Strategy | Mechanism | Strengths | Weaknesses |
|
| 90 |
+
|----------|-----------|-----------|------------|
|
| 91 |
+
| **Uncertainty Sampling** | Select sentences/arcs where parser confidence is lowest | Simple, robust across models | May select outliers; ignores diversity |
|
| 92 |
+
| **Head Entropy** | Measure entropy of the distribution over possible heads for each token | Well-suited for dependency parsing | Computationally expensive for large output spaces |
|
| 93 |
+
| **Query-by-Committee** | Multiple parsers disagree on annotation | Good diversity; model-agnostic | Requires training multiple models |
|
| 94 |
+
| **DPP Batch Selection** | Enforce diversity in each batch via DPPs | Best for batch AL; avoids redundancy | More complex implementation |
|
| 95 |
+
| **Partial Annotation** | Only annotate most uncertain sub-structures | Maximizes information per annotator action | Requires parser that handles partial trees |
|
| 96 |
+
| **Information Density** | Weight uncertainty by representativeness | Avoids outlier selection | Requires density estimation |
|
| 97 |
+
|
| 98 |
+
#### Best Practices
|
| 99 |
+
|
| 100 |
+
1. **For sentence selection**: Use diversity-aware batch selection (DPPs) combined with uncertainty scoring (Shi et al., 2021)
|
| 101 |
+
2. **For arc-level selection**: Use head entropy to select the most informative individual arcs for annotation (Li et al., 2016)
|
| 102 |
+
3. **For cost-aware AL**: Account for sentence length when computing annotation cost (Baldridge & Osborne, 2004)
|
| 103 |
+
4. **For modern pipelines**: Combine partial annotation with self-training for maximum efficiency (Zhang et al., 2023)
|
| 104 |
+
5. **Cold start**: Use pre-trained language model features to bootstrap selection before any annotations exist (Yuan et al., 2020)
|
| 105 |
+
|
| 106 |
+
### RQ4: How has active learning been applied to low-resource languages?
|
| 107 |
+
|
| 108 |
+
#### Direct Applications to Treebank Construction
|
| 109 |
+
|
| 110 |
+
- **Irish Treebank** (Lynn & Foster, 2015) — *Active Learning and the Irish Treebank*: Compared AL bootstrapping to random selection for manual revision of an automatically parsed Irish treebank. Found that AL outperforms passive learning, though when annotation effort is measured by actual time (not sentence count), the advantage diminishes. Key lesson: sentence-level AL may not account for the varying difficulty of different sentences.
|
| 111 |
+
|
| 112 |
+
- **Pomak Treebank** (2023) — *Methodological Issues Regarding the Semi-automatic UD Treebank Creation of Under-resourced Languages: The Case of Pomak* (UDW @ GURT 2023, ACL Anthology): Most directly relevant to UDD-1. Applied active annotation to create a UD treebank for Pomak (6,351 sentences). Used a circular process: annotation prediction → expert correction → model retraining. Validated that active annotation schemes significantly reduce annotation time for under-resourced UD treebanks.
|
| 113 |
+
|
| 114 |
+
- **Exploring Active Learning Approaches in Treebank Development** (Springer, 2023-2024): Proposed practical active annotation schemes for under-resourced languages, where experts strategically select sentences for annotation. Applied to newly created Pomak corpus published in UD treebanks (300 annotated sentences initial study).
|
| 115 |
+
|
| 116 |
+
- **Dutch UD Treebank** (Bouma & van Noord, 2017) — *Increasing Return on Annotation Investment: The Automatic Construction of a Universal Dependency Treebank for Dutch* (UDW 2017): While not strictly AL, demonstrated how to maximize annotation investment when constructing UD treebanks by using automatic parsing + selective manual correction.
|
| 117 |
+
|
| 118 |
+
#### Vietnamese-Specific Resources (No AL Applied Yet)
|
| 119 |
+
|
| 120 |
+
- **BKTreebank** (Nguyen et al., 2018) — Vietnamese dependency treebank built manually without AL
|
| 121 |
+
- **VnDT** (Nguyen et al., 2014) — 10K+ sentence Vietnamese dependency treebank, also built without AL
|
| 122 |
+
- **VTB** — Vietnamese Treebank in UD, manually annotated
|
| 123 |
+
- **PhoNLP** (Nguyen & Tuan Nguyen, 2021) — Multi-task model for Vietnamese POS/NER/parsing, potential AL base model
|
| 124 |
+
|
| 125 |
+
**Gap**: No published work applies active learning specifically to Vietnamese dependency treebank construction.
|
| 126 |
+
|
| 127 |
+
---
|
| 128 |
+
|
| 129 |
+
## State-of-the-Art Summary
|
| 130 |
+
|
| 131 |
+
### Current Best Methods for AL + Dependency Parsing
|
| 132 |
+
|
| 133 |
+
| Method | Year | Key Innovation | Best For |
|
| 134 |
+
|--------|------|---------------|----------|
|
| 135 |
+
| DPP Batch AL (Shi et al.) | 2021 | Diversity-aware batch selection | Sentence-level selection |
|
| 136 |
+
| Partial Annotation AL (Li et al.) | 2016 | Arc-level selection with head entropy | Minimizing annotation per sentence |
|
| 137 |
+
| Partial + Self-Training (Zhang et al.) | 2023 | Combining partial annotation with pseudo-labels | Maximum cost efficiency |
|
| 138 |
+
| Active DOP (van Cranenburgh) | 2018 | Online learning annotation tool | Practical annotation workflow |
|
| 139 |
+
| Pomak Active Annotation | 2023 | Circular AL for UD treebanks | Low-resource UD treebank creation |
|
| 140 |
+
|
| 141 |
+
### Trends
|
| 142 |
+
|
| 143 |
+
1. **From sentence to sub-structure selection**: Moving from selecting whole sentences to selecting individual arcs/tokens for annotation
|
| 144 |
+
2. **Combining AL with self-training**: Using model predictions as pseudo-labels for unannotated parts
|
| 145 |
+
3. **Diversity-aware batch selection**: Using DPPs or similar methods to avoid redundant selections
|
| 146 |
+
4. **Cost-aware evaluation**: Measuring annotation cost by actual time, not just instance count
|
| 147 |
+
5. **LLM integration**: Emerging work on using LLMs as annotators within AL loops (Xia et al., ACL 2025 survey)
|
| 148 |
+
|
| 149 |
+
---
|
| 150 |
+
|
| 151 |
+
## Research Gaps
|
| 152 |
+
|
| 153 |
+
1. **No AL for Vietnamese treebanks**: No published work applies AL to Vietnamese dependency treebank construction — this is the primary gap relevant to UDD-1
|
| 154 |
+
2. **Silver-to-gold AL pipeline**: Limited work on using AL to selectively upgrade silver-standard treebanks to gold standard (directly relevant to UDD-1's post-processing pipeline)
|
| 155 |
+
3. **AL for UD-specific annotation**: Most AL work is for general dependency parsing; few studies specifically target UD annotation standards and their constraints
|
| 156 |
+
4. **Domain-specific AL for legal text**: No work on AL for legal domain dependency parsing (UDD-1's primary domain)
|
| 157 |
+
5. **AL with Vietnamese pre-trained models**: No work combining PhoBERT/PhoNLP with AL strategies for treebank construction
|
| 158 |
+
|
| 159 |
+
---
|
| 160 |
+
|
| 161 |
+
## Recommendations for UDD-1
|
| 162 |
+
|
| 163 |
+
### Proposed Active Learning Pipeline for UDD-1 Gold Standard Creation
|
| 164 |
+
|
| 165 |
+
Based on this literature review, we recommend the following approach:
|
| 166 |
+
|
| 167 |
+
1. **Parse with current pipeline** (already done): Use Underthesea + fix_syntax_errors() to generate silver-standard trees
|
| 168 |
+
2. **Train initial parser**: Train a dependency parser (e.g., fine-tuned PhoBERT) on the silver-standard UDD-1 data
|
| 169 |
+
3. **Compute arc-level uncertainty**: For each token in each sentence, compute the head entropy (Li et al., 2016) — the entropy of the distribution over possible heads
|
| 170 |
+
4. **Select sentences via DPP batch selection**: Use diversity-aware batch selection (Shi et al., 2021) to select a diverse, informative subset of sentences
|
| 171 |
+
5. **Partial annotation**: Within selected sentences, only present the most uncertain arcs to human annotators for correction (Zhang et al., 2023)
|
| 172 |
+
6. **Self-training**: Use corrected arcs + model predictions on remaining arcs to retrain the parser
|
| 173 |
+
7. **Iterate**: Repeat steps 3-6 until quality targets are met
|
| 174 |
+
|
| 175 |
+
### Expected Benefits
|
| 176 |
+
- **Reduce annotation cost by 50-70%** compared to manually annotating all 20K sentences
|
| 177 |
+
- **Prioritize the hardest cases**: Focus human effort on the 8.6% XPOS-UPOS mismatches and uncertain parses
|
| 178 |
+
- **Produce gold-standard evaluation set**: Create a high-quality subset for reliable evaluation (addressing the review's top weakness)
|
| 179 |
+
|
| 180 |
+
---
|
| 181 |
+
|
| 182 |
+
## References
|
| 183 |
+
|
| 184 |
+
See [papers.md](papers.md) for full paper database and [bibliography.bib](bibliography.bib) for BibTeX entries.
|
active_learning/references/research_active_learning_ud/bibliography.bib
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
% Active Learning for Dependency Parsing and Treebank Construction
|
| 2 |
+
% Bibliography for UDD-1 Active Learning Research
|
| 3 |
+
|
| 4 |
+
@article{hwa2004sample,
|
| 5 |
+
title={Sample Selection for Statistical Parsing},
|
| 6 |
+
author={Hwa, Rebecca},
|
| 7 |
+
journal={Computational Linguistics},
|
| 8 |
+
volume={30},
|
| 9 |
+
number={3},
|
| 10 |
+
pages={253--276},
|
| 11 |
+
year={2004},
|
| 12 |
+
publisher={MIT Press}
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
@inproceedings{tang2002active,
|
| 16 |
+
title={Active Learning for Statistical Natural Language Parsing},
|
| 17 |
+
author={Tang, Min and Luo, Xiaoqiang and Roukos, Salim},
|
| 18 |
+
booktitle={Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics},
|
| 19 |
+
pages={120--127},
|
| 20 |
+
year={2002},
|
| 21 |
+
url={https://aclanthology.org/P02-1016/}
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
@inproceedings{li2016active,
|
| 25 |
+
title={Active Learning for Dependency Parsing with Partial Annotation},
|
| 26 |
+
author={Li, Zhenghua and Zhang, Min and Zhang, Yue and Liu, Zhanyi and Chen, Wenliang and Wu, Hua and Wang, Haifeng},
|
| 27 |
+
booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
|
| 28 |
+
pages={344--354},
|
| 29 |
+
year={2016},
|
| 30 |
+
url={https://aclanthology.org/P16-1033/}
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
@inproceedings{shi2021diversity,
|
| 34 |
+
title={Diversity-Aware Batch Active Learning for Dependency Parsing},
|
| 35 |
+
author={Shi, Tianze and Benton, Adrian and Malioutov, Igor and Irsoy, Ozan},
|
| 36 |
+
booktitle={Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
|
| 37 |
+
year={2021},
|
| 38 |
+
url={https://aclanthology.org/2021.naacl-main.207/}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
@inproceedings{mirroshandel2011active,
|
| 42 |
+
title={Active Learning for Dependency Parsing Using Partially Annotated Sentences},
|
| 43 |
+
author={Mirroshandel, Seyed Abolghasem and Nasr, Alexis},
|
| 44 |
+
booktitle={Proceedings of the 12th International Conference on Parsing Technologies},
|
| 45 |
+
year={2011},
|
| 46 |
+
url={https://aclanthology.org/W11-2917/}
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
@inproceedings{majidi2013active,
|
| 50 |
+
title={Active Learning for Dependency Parsing by A Committee of Parsers},
|
| 51 |
+
author={Majidi, Saeed and Crane, Gregory},
|
| 52 |
+
booktitle={Proceedings of the 13th International Conference on Parsing Technologies},
|
| 53 |
+
pages={98--105},
|
| 54 |
+
year={2013},
|
| 55 |
+
url={https://aclanthology.org/W13-5711/}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
@inproceedings{zhang2023data,
|
| 59 |
+
title={Data-efficient Active Learning for Structured Prediction with Partial Annotation and Self-Training},
|
| 60 |
+
author={Zhang, Zhisong and Strubell, Emma and Hovy, Eduard},
|
| 61 |
+
booktitle={Findings of the Association for Computational Linguistics: EMNLP 2023},
|
| 62 |
+
year={2023},
|
| 63 |
+
url={https://aclanthology.org/2023.findings-emnlp.865/}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
@inproceedings{sassano2010using,
|
| 67 |
+
title={Using Smaller Constituents Rather Than Sentences in Active Learning for Japanese Dependency Parsing},
|
| 68 |
+
author={Sassano, Manabu and Kurohashi, Sadao},
|
| 69 |
+
booktitle={Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics},
|
| 70 |
+
year={2010},
|
| 71 |
+
url={https://aclanthology.org/P10-1037/}
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
@inproceedings{flannery2015combining,
|
| 75 |
+
title={Combining Active Learning and Partial Annotation for Domain Adaptation of a Japanese Dependency Parser},
|
| 76 |
+
author={Flannery, Daniel and Mori, Shinsuke},
|
| 77 |
+
booktitle={Proceedings of the 14th International Conference on Parsing Technologies},
|
| 78 |
+
year={2015},
|
| 79 |
+
url={https://aclanthology.org/W15-2202/}
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
@inproceedings{zhang2017dependency,
|
| 83 |
+
title={Dependency Parsing with Partial Annotations: An Empirical Comparison},
|
| 84 |
+
author={Zhang, Yue and Li, Zhenghua and Lang, Jun and Xia, Qingrong and Zhang, Min},
|
| 85 |
+
booktitle={Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
|
| 86 |
+
year={2017},
|
| 87 |
+
url={https://aclanthology.org/I17-1006/}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
@inproceedings{baldridge2004active,
|
| 91 |
+
title={Active Learning and the Total Cost of Annotation},
|
| 92 |
+
author={Baldridge, Jason and Osborne, Miles},
|
| 93 |
+
booktitle={Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing},
|
| 94 |
+
year={2004},
|
| 95 |
+
url={https://aclanthology.org/W04-3202/}
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
@inproceedings{settles2008analysis,
|
| 99 |
+
title={An Analysis of Active Learning Strategies for Sequence Labeling Tasks},
|
| 100 |
+
author={Settles, Burr and Craven, Mark},
|
| 101 |
+
booktitle={Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing},
|
| 102 |
+
year={2008},
|
| 103 |
+
url={https://aclanthology.org/D08-1112/}
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
@inproceedings{pomak2023udw,
|
| 107 |
+
title={Methodological Issues Regarding the Semi-automatic {UD} Treebank Creation of Under-resourced Languages: The Case of {Pomak}},
|
| 108 |
+
booktitle={Proceedings of the Universal Dependencies Workshop (UDW) @ GURT 2023},
|
| 109 |
+
year={2023},
|
| 110 |
+
url={https://aclanthology.org/2023.udw-1.4/}
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
@inproceedings{bouma2017increasing,
|
| 114 |
+
title={Increasing Return on Annotation Investment: The Automatic Construction of a Universal Dependency Treebank for {Dutch}},
|
| 115 |
+
author={Bouma, Gosse and van Noord, Gertjan},
|
| 116 |
+
booktitle={Proceedings of the NoDaLiDa 2017 Workshop on Universal Dependencies},
|
| 117 |
+
year={2017},
|
| 118 |
+
url={https://aclanthology.org/W17-0403/}
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
@inproceedings{vancranenburgh2018active,
|
| 122 |
+
title={Active {DOP}: A Constituency Treebank Annotation Tool with Online Learning},
|
| 123 |
+
author={van Cranenburgh, Andreas},
|
| 124 |
+
booktitle={Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations},
|
| 125 |
+
year={2018},
|
| 126 |
+
url={https://aclanthology.org/C18-2009/}
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
@inproceedings{lynn2015active,
|
| 130 |
+
title={Active Learning and the {Irish} Treebank},
|
| 131 |
+
author={Lynn, Teresa and Foster, Jennifer},
|
| 132 |
+
year={2015}
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
@inproceedings{rehbein2018sprucing,
|
| 136 |
+
title={Sprucing up the Trees -- Error Detection in Treebanks},
|
| 137 |
+
author={Rehbein, Ines and Ruppenhofer, Josef},
|
| 138 |
+
booktitle={Proceedings of the 27th International Conference on Computational Linguistics},
|
| 139 |
+
year={2018},
|
| 140 |
+
url={https://aclanthology.org/C18-1010/}
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
@inproceedings{zhang2022survey,
|
| 144 |
+
title={A Survey of Active Learning for Natural Language Processing},
|
| 145 |
+
author={Zhang, Zhisong and Strubell, Emma and Hovy, Eduard},
|
| 146 |
+
booktitle={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing},
|
| 147 |
+
year={2022},
|
| 148 |
+
url={https://aclanthology.org/2022.emnlp-main.414/}
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
@techreport{olsson2009literature,
|
| 152 |
+
title={A Literature Survey of Active Machine Learning in the Context of Natural Language Processing},
|
| 153 |
+
author={Olsson, Fredrik},
|
| 154 |
+
institution={Swedish Institute of Computer Science},
|
| 155 |
+
number={T2009:06},
|
| 156 |
+
year={2009}
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
@inproceedings{xia2025selection,
|
| 160 |
+
title={From Selection to Generation: A Survey of {LLM}-based Active Learning},
|
| 161 |
+
author={Xia, Yu and others},
|
| 162 |
+
booktitle={Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics},
|
| 163 |
+
year={2025},
|
| 164 |
+
url={https://aclanthology.org/2025.acl-long.708/}
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
@article{rotman2019deep,
|
| 168 |
+
title={Deep Contextualized Self-training for Low Resource Dependency Parsing},
|
| 169 |
+
author={Rotman, Guy and Reichart, Roi},
|
| 170 |
+
journal={Transactions of the Association for Computational Linguistics},
|
| 171 |
+
volume={7},
|
| 172 |
+
pages={695--713},
|
| 173 |
+
year={2019},
|
| 174 |
+
url={https://aclanthology.org/Q19-1044/}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
@inproceedings{schroder2022revisiting,
|
| 178 |
+
title={Revisiting Uncertainty-based Query Strategies for Active Learning with Transformers},
|
| 179 |
+
author={Schroder, Christopher and Niekler, Andreas and Potthast, Martin},
|
| 180 |
+
booktitle={Findings of the Association for Computational Linguistics: ACL 2022},
|
| 181 |
+
year={2022},
|
| 182 |
+
url={https://aclanthology.org/2022.findings-acl.172/}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
@inproceedings{yuan2020cold,
|
| 186 |
+
title={Cold-start Active Learning through Self-supervised Language Modeling},
|
| 187 |
+
author={Yuan, Michelle and Lin, Hsuan-Tien and Boyd-Graber, Jordan},
|
| 188 |
+
booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
|
| 189 |
+
year={2020},
|
| 190 |
+
url={https://aclanthology.org/2020.emnlp-main.637/}
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
@inproceedings{brants1998automation,
|
| 194 |
+
title={Automation of Treebank Annotation},
|
| 195 |
+
author={Brants, Thorsten and Skut, Wojciech},
|
| 196 |
+
booktitle={New Methods in Language Processing and Computational Natural Language Learning},
|
| 197 |
+
year={1998},
|
| 198 |
+
url={https://aclanthology.org/W98-1207/}
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
% Vietnamese NLP Resources
|
| 202 |
+
@inproceedings{nguyen2018bktreebank,
|
| 203 |
+
title={{BKTreebank}: Building a {Vietnamese} Dependency Treebank},
|
| 204 |
+
author={Nguyen, Kiem-Hieu and others},
|
| 205 |
+
booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation},
|
| 206 |
+
year={2018},
|
| 207 |
+
url={https://aclanthology.org/L18-1341/}
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
@inproceedings{nguyen2018vncorenlp,
|
| 211 |
+
title={{VnCoreNLP}: A {Vietnamese} Natural Language Processing Toolkit},
|
| 212 |
+
author={Nguyen, Dat Quoc and others},
|
| 213 |
+
booktitle={Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
|
| 214 |
+
year={2018},
|
| 215 |
+
url={https://aclanthology.org/N18-5012/}
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
@inproceedings{arora2009estimating,
|
| 219 |
+
title={Estimating Annotation Cost for Active Learning in a Multi-Annotator Environment},
|
| 220 |
+
author={Arora, Shilpa and Nyberg, Eric and Rose, Carolyn P.},
|
| 221 |
+
booktitle={Proceedings of the NAACL HLT 2009 Workshop on Active Learning for Natural Language Processing},
|
| 222 |
+
year={2009},
|
| 223 |
+
url={https://aclanthology.org/W09-1903/}
|
| 224 |
+
}
|
active_learning/references/research_active_learning_ud/comparison.md
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Comparison Tables: Active Learning Methods for Dependency Parsing
|
| 2 |
+
|
| 3 |
+
## Method Comparison
|
| 4 |
+
|
| 5 |
+
| Paper | Year | Venue | Query Strategy | Annotation Unit | Language | Parser Type | Key Result |
|
| 6 |
+
|-------|------|-------|---------------|-----------------|----------|-------------|------------|
|
| 7 |
+
| Tang et al. | 2002 | ACL | Uncertainty + Clustering | Sentence | English | Statistical | AL reduces corpus size |
|
| 8 |
+
| Hwa | 2004 | CL | Uncertainty sampling | Sentence | English | Statistical | ~50% fewer sentences |
|
| 9 |
+
| Baldridge & Osborne | 2004 | EMNLP | Various (cost-aware) | Sentence | English | HPSG | Cost matters more than strategy |
|
| 10 |
+
| Settles & Craven | 2008 | EMNLP | Uncertainty + Info. Density | Token/Sequence | English | CRF | Information density is most robust |
|
| 11 |
+
| Sassano & Kurohashi | 2010 | ACL | Uncertainty | Sub-sentential (bunsetsu) | Japanese | MST parser | ~30% annotation reduction |
|
| 12 |
+
| Mirroshandel & Nasr | 2011 | IWPT | Uncertainty | Partial arcs | French | Graph-based | Partial annotation beneficial |
|
| 13 |
+
| Majidi & Crane | 2013 | IWPT | Query-by-Committee | Sentence | English | Multiple | QBC effective for dep. parsing |
|
| 14 |
+
| Flannery & Mori | 2015 | IWPT | AL + Partial | Partial arcs | Japanese | Transition | Effective for domain adaptation |
|
| 15 |
+
| Li et al. | 2016 | ACL | Head entropy | Individual arcs | Chinese/English | Neural | 40-60% less arc annotation |
|
| 16 |
+
| Zhang et al. (parsing) | 2017 | IJCNLP | Various | Partial trees | English | Neural | Forest training > constrained decoding |
|
| 17 |
+
| Shi et al. | 2021 | NAACL | DPP diversity + uncertainty | Sentence (batch) | English | Biaffine | ~20-30% fewer sentences vs random |
|
| 18 |
+
| Zhang et al. | 2023 | EMNLP-F | Partial + Self-training | Sub-structures | Multiple | Neural | Best cost reduction across 4 tasks |
|
| 19 |
+
|
| 20 |
+
## Query Strategy Effectiveness Ranking (for Dependency Parsing)
|
| 21 |
+
|
| 22 |
+
| Rank | Strategy | Pros | Cons | Best When |
|
| 23 |
+
|------|----------|------|------|-----------|
|
| 24 |
+
| 1 | **DPP Batch + Uncertainty** (Shi 2021) | Diverse + informative batches | Complex implementation | Batch selection, sufficient compute |
|
| 25 |
+
| 2 | **Head Entropy + Partial** (Li 2016) | Minimizes per-sentence effort | Requires probabilistic parser | Arc-level correction of silver data |
|
| 26 |
+
| 3 | **Partial + Self-Training** (Zhang 2023) | Maximum cost reduction | Requires good initial model | Upgrading silver to gold |
|
| 27 |
+
| 4 | **Query-by-Committee** (Majidi 2013) | Model-agnostic | Needs multiple parsers | Multiple parsers available |
|
| 28 |
+
| 5 | **Uncertainty Sampling** (Hwa 2004) | Simple, robust | May select outliers | Quick baseline |
|
| 29 |
+
| 6 | **Information Density** (Settles 2008) | Avoids outliers | Requires density estimation | Diverse corpora |
|
| 30 |
+
|
| 31 |
+
## Active Learning for Treebank Construction: Case Studies
|
| 32 |
+
|
| 33 |
+
| Treebank | Language | Resource Level | Method | Sentences | Key Finding |
|
| 34 |
+
|----------|----------|---------------|--------|-----------|-------------|
|
| 35 |
+
| PTB (simulated) | English | High | DPP batch AL | ~40K | 20-30% reduction (Shi 2021) |
|
| 36 |
+
| Chinese (simulated) | Chinese | High | Head entropy partial | ~16K | 40-60% less arc annotation (Li 2016) |
|
| 37 |
+
| Irish Treebank | Irish | Low | AL bootstrapping | ~1K | AL helps but time savings unclear (Lynn 2015) |
|
| 38 |
+
| Pomak-Philotis | Pomak | Very Low | Active annotation | 6,351 | Circular AL effective for UD (2023) |
|
| 39 |
+
| Dutch UD | Dutch | Medium | Automatic + selective manual | Large | Maximize ROI on annotation (Bouma 2017) |
|
| 40 |
+
| **UDD-1 (proposed)** | **Vietnamese** | **Medium** | **Partial + self-training** | **20,000** | **Target: 50-70% cost reduction** |
|
| 41 |
+
|
| 42 |
+
## Applicability to UDD-1
|
| 43 |
+
|
| 44 |
+
### Most Applicable Methods (Ranked by Suitability)
|
| 45 |
+
|
| 46 |
+
| Rank | Method | Why Suitable for UDD-1 | Implementation Complexity |
|
| 47 |
+
|------|--------|----------------------|--------------------------|
|
| 48 |
+
| 1 | **Partial Annotation + Self-Training** (Zhang 2023) | UDD-1 already has silver parses; only need to correct uncertain arcs | Medium |
|
| 49 |
+
| 2 | **Head Entropy Selection** (Li 2016) | Can compute arc-level uncertainty on current UDD-1 parser | Medium |
|
| 50 |
+
| 3 | **DPP Batch Selection** (Shi 2021) | Diverse sentence selection across legal/news domains | High |
|
| 51 |
+
| 4 | **Error Detection** (Rehbein 2018) | Can identify likely errors in silver standard for targeted correction | Medium |
|
| 52 |
+
| 5 | **QBC with Multiple Parsers** (Majidi 2013) | Can use Underthesea + VnCoreNLP + PhoNLP as committee | Low |
|
| 53 |
+
| 6 | **Active Annotation Loop** (Pomak 2023) | Proven methodology for UD treebanks | Low |
|
| 54 |
+
|
| 55 |
+
### Recommended Hybrid Approach for UDD-1
|
| 56 |
+
|
| 57 |
+
```
|
| 58 |
+
Step 1: Train PhoBERT-based parser on silver UDD-1 data
|
| 59 |
+
Step 2: Compute head entropy for all tokens (uncertainty per arc)
|
| 60 |
+
Step 3: Use DPP to select diverse batch of high-uncertainty sentences
|
| 61 |
+
Step 4: Within selected sentences, present only uncertain arcs to annotator
|
| 62 |
+
Step 5: Use corrected arcs + model predictions as training data
|
| 63 |
+
Step 6: Retrain parser on corrected data
|
| 64 |
+
Step 7: Repeat until quality target met
|
| 65 |
+
|
| 66 |
+
Expected outcome: Gold-standard subset of ~2-5K sentences
|
| 67 |
+
with ~50-70% less annotation effort than full manual annotation
|
| 68 |
+
```
|
active_learning/references/research_active_learning_ud/guideline_development.md
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Learning Combined with Annotation Guideline Development for UD Treebanks
|
| 2 |
+
|
| 3 |
+
**Date**: 2026-02-10
|
| 4 |
+
**Extends**: [README.md](README.md) (Active Learning for UD Treebank Construction)
|
| 5 |
+
|
| 6 |
+
## Executive Summary
|
| 7 |
+
|
| 8 |
+
Annotation guideline development and active learning are traditionally treated as separate processes, but they share a fundamental property: **both are iterative**. Guidelines improve through cycles of annotation, disagreement analysis, and revision; active learning improves through cycles of selection, annotation, and model retraining. Combining them creates a **co-evolutionary loop** where AL selects the most informative (and often most ambiguous) examples, which in turn expose gaps in annotation guidelines that need resolution. This synergy is particularly valuable for building UD treebanks for new languages like Vietnamese, where language-specific guidelines must be developed alongside the annotation itself.
|
| 9 |
+
|
| 10 |
+
**Key insight**: No published work formally integrates AL with guideline co-development for UD treebanks. This represents a novel contribution opportunity for UDD-1.
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## 1. How Annotation Guidelines Are Developed
|
| 15 |
+
|
| 16 |
+
### 1.1 The Standard Iterative Cycle
|
| 17 |
+
|
| 18 |
+
The established methodology for developing annotation guidelines follows a "waterfall model" (Wikipedia/Treebank) where:
|
| 19 |
+
|
| 20 |
+
1. **Draft initial guidelines** based on existing UD universal guidelines + language-specific adaptations
|
| 21 |
+
2. **Pilot annotation** by multiple annotators independently on the same data
|
| 22 |
+
3. **Measure IAA** (Inter-Annotator Agreement) using Cohen's kappa or similar metrics
|
| 23 |
+
4. **Analyze disagreements** to identify guideline gaps, ambiguities, and unclear definitions
|
| 24 |
+
5. **Refine guidelines** to address identified issues
|
| 25 |
+
6. **Repeat** until target IAA is achieved
|
| 26 |
+
|
| 27 |
+
**Key references**:
|
| 28 |
+
- **Marcus et al. (1993)** — *Building a Large Annotated Corpus of English: The Penn Treebank*: Established the gold standard for treebank guideline development. Found that "a much larger, much more fully specified stylebook than the initial stylebook is a prerequisite for high levels of inter-annotator agreement." Used ~10% overlapped annotation for ongoing consistency monitoring.
|
| 29 |
+
- **Wissler et al. (2014)** — *Iterative Refinement and Quality Checking of Annotation Guidelines* (LREC 2012): Provided empirical evidence that kappa-based IAA measurements improve over iterative annotation rounds with continuously refined guidelines.
|
| 30 |
+
- **Pustejovsky & Stubbs (2013)** — *Natural Language Annotation for Machine Learning*: Canonical textbook on the MAMA cycle (Model-Annotate-Model-Annotate).
|
| 31 |
+
|
| 32 |
+
### 1.2 UD-Specific Guideline Development
|
| 33 |
+
|
| 34 |
+
The UD framework has a specific governance model:
|
| 35 |
+
|
| 36 |
+
- **Universal guidelines**: Maintained by a small group of core members. Based on "a universal pool of structural and functional categories that languages select from" (de Marneffe et al., CL 2021).
|
| 37 |
+
- **Language-specific guidelines**: Maintained by each language team. Can add language-specific subtypes but must comply with universal constraints.
|
| 38 |
+
- **Design principles** (de Marneffe et al., CL 2021):
|
| 39 |
+
1. Suitable for rapid, consistent annotation by a human annotator
|
| 40 |
+
2. Easily comprehended and used by a non-linguist
|
| 41 |
+
3. Suitable for computer parsing with high accuracy
|
| 42 |
+
4. Support downstream language understanding tasks
|
| 43 |
+
|
| 44 |
+
**Key references**:
|
| 45 |
+
- **de Marneffe et al. (2021)** — *Universal Dependencies* (Computational Linguistics, 47(2):255-308): The definitive paper on UD design principles and the governance of universal vs. language-specific guidelines.
|
| 46 |
+
- **Nivre et al. (2016)** — *Universal Dependencies v1: A Multilingual Treebank Collection*: The foundational UD paper describing annotation scheme based on Stanford dependencies, Google universal POS tags, and Interset.
|
| 47 |
+
- **Nivre et al. (2020)** — *Universal Dependencies v2: An Evergrowing Multilingual Treebank Collection* (LREC): Updated UD framework with 200+ treebanks in 150+ languages.
|
| 48 |
+
|
| 49 |
+
### 1.3 Guidelines for New Languages
|
| 50 |
+
|
| 51 |
+
When creating UD treebanks for new languages, the process involves:
|
| 52 |
+
|
| 53 |
+
1. Start from universal UD guidelines
|
| 54 |
+
2. Identify language-specific phenomena not covered by universal guidelines
|
| 55 |
+
3. Develop language-specific annotation conventions through pilot studies
|
| 56 |
+
4. Validate through IAA measurement and expert review
|
| 57 |
+
5. Document in language-specific guideline pages on universaldependencies.org
|
| 58 |
+
|
| 59 |
+
**Key references for new-language treebanks**:
|
| 60 |
+
- **Sinhala Dependency Treebank** (2023, UDW) — New UD treebank for Sinhala including guideline development for an under-resourced language. URL: https://aclanthology.org/2023.udw-1.3/
|
| 61 |
+
- **Pomak UD Treebank** (2023, UDW) — Semi-automatic UD treebank creation for Pomak with active annotation. URL: https://aclanthology.org/2023.udw-1.4/
|
| 62 |
+
- **African_UD Project** (Princeton CDH) — Creating UD treebanks for 11 typologically diverse African languages.
|
| 63 |
+
- **Dependency Annotation of Ottoman Turkish** (2024, LAW) — Iterative annotation scheme for a low-resource historical language. URL: https://aclanthology.org/2024.law-1.18/
|
| 64 |
+
|
| 65 |
+
---
|
| 66 |
+
|
| 67 |
+
## 2. The Intersection: Active Learning + Guideline Development
|
| 68 |
+
|
| 69 |
+
### 2.1 Why AL and Guideline Development Should Be Combined
|
| 70 |
+
|
| 71 |
+
Traditional AL assumes **fixed annotation guidelines** — the task definition doesn't change during the AL loop. But for new UD treebanks, this assumption fails because:
|
| 72 |
+
|
| 73 |
+
1. **Ambiguous cases are the most informative**: AL selects examples where the parser is uncertain, which often correspond to genuinely ambiguous linguistic constructions that need guideline clarification.
|
| 74 |
+
2. **Guidelines evolve through exposure to hard cases**: Annotators encounter novel constructions that weren't anticipated in initial guidelines.
|
| 75 |
+
3. **Error patterns reveal systematic guideline gaps**: When parser errors cluster around specific constructions, it signals that the guideline for that construction needs refinement.
|
| 76 |
+
4. **Anchoring bias**: When annotators correct pre-annotated (silver-standard) data, they may be anchored to the parser's output. Guidelines must explicitly address how to handle pre-annotation.
|
| 77 |
+
|
| 78 |
+
### 2.2 The Co-Evolutionary Loop
|
| 79 |
+
|
| 80 |
+
We propose a **Guideline-Aware Active Learning (GAAL)** framework:
|
| 81 |
+
|
| 82 |
+
```
|
| 83 |
+
┌──────────────────────────────────────────────────────┐
|
| 84 |
+
│ │
|
| 85 |
+
│ ┌─────────┐ ┌──────────┐ ┌────────────┐ │
|
| 86 |
+
│ │ Parse │───>│ Select │───>│ Annotate │ │
|
| 87 |
+
│ │ (model) │ │ (AL) │ │ (human) │ │
|
| 88 |
+
│ └────▲─────┘ └──────────┘ └─────┬──────┘ │
|
| 89 |
+
│ │ │ │
|
| 90 |
+
│ │ ┌──────────┐ │ │
|
| 91 |
+
│ │ │ Analyze │<──────────┘ │
|
| 92 |
+
│ │ │ disagree-│ │
|
| 93 |
+
│ │ │ ments │ │
|
| 94 |
+
│ │ └────┬─────┘ │
|
| 95 |
+
│ │ │ │
|
| 96 |
+
│ ┌────┴─────┐ ┌────▼──────┐ │
|
| 97 |
+
│ │ Retrain │ │ Refine │ │
|
| 98 |
+
│ │ parser │ │ guidelines│ │
|
| 99 |
+
│ └──────────┘ └───────────┘ │
|
| 100 |
+
│ │
|
| 101 |
+
│ Guideline-Aware Active Learning │
|
| 102 |
+
└──────────────────────────────────────────────────────┘
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
**Cycle**:
|
| 106 |
+
1. **Parse**: Model generates silver-standard annotations
|
| 107 |
+
2. **Select**: AL selects most informative sentences/arcs
|
| 108 |
+
3. **Annotate**: Multiple annotators correct selected items
|
| 109 |
+
4. **Analyze disagreements**: Identify where annotators disagree → these are guideline gaps
|
| 110 |
+
5. **Refine guidelines**: Update language-specific guidelines to resolve ambiguities
|
| 111 |
+
6. **Retrain parser**: Use corrected annotations to improve the model
|
| 112 |
+
7. **Repeat**: Each cycle produces both better data AND better guidelines
|
| 113 |
+
|
| 114 |
+
### 2.3 Key Advantages of the Combined Approach
|
| 115 |
+
|
| 116 |
+
| Aspect | AL Only | Guidelines Only | AL + Guidelines Combined |
|
| 117 |
+
|--------|---------|----------------|--------------------------|
|
| 118 |
+
| Data quality | Improves via selective annotation | Improves via consistency | Both: consistent + well-selected |
|
| 119 |
+
| Annotation cost | Reduces by 50-70% | No reduction | Same cost reduction + better guidelines |
|
| 120 |
+
| Guideline coverage | No improvement | Improves through all data | Focuses on hardest cases first |
|
| 121 |
+
| Parser improvement | Yes, from corrected data | Indirect, through consistency | Both direct and indirect |
|
| 122 |
+
| Vietnamese-specific | No language insights | Deep language analysis | Analysis focused on most important phenomena |
|
| 123 |
+
|
| 124 |
+
---
|
| 125 |
+
|
| 126 |
+
## 3. Related Work: Semi-Automatic Annotation and Guideline Evolution
|
| 127 |
+
|
| 128 |
+
### 3.1 Semi-Automatic Annotation Workflows
|
| 129 |
+
|
| 130 |
+
- **Brants & Skut (1998)** — *Automation of Treebank Annotation* (CoNLL): Showed that automatic pre-annotation + human correction is 3-5x faster than manual annotation. The degree of automation increases as training data grows — a bootstrapping approach. URL: https://aclanthology.org/W98-1207/
|
| 131 |
+
|
| 132 |
+
- **van Cranenburgh (2018)** — *Active DOP* (COLING): Online learning annotation tool where the model immediately learns from each annotated sentence, providing progressively better pre-annotations. URL: https://aclanthology.org/C18-2009/
|
| 133 |
+
|
| 134 |
+
- **Arborator-Grew** (2020, LREC) — *When Collaborative Treebank Curation Meets Graph Grammars*: Collaborative annotation tool combining features of Arborator and Grew for grammar-based curation. URL: https://aclanthology.org/2020.lrec-1.651/
|
| 135 |
+
|
| 136 |
+
### 3.2 Annotation Bias in Pre-Annotated Data
|
| 137 |
+
|
| 138 |
+
- **Rehbein et al. (2016)** — *Bias and Agreement in Syntactic Annotations*: **Critical finding** — "protocols where human annotators edit pre-annotated machine outputs are vulnerable to anchoring, a cognitive bias by which annotators are drawn toward pre-existing values." This directly affects UDD-1's approach of correcting silver-standard data.
|
| 139 |
+
- **Implication for UDD-1**: Guidelines must explicitly instruct annotators to consider each annotation independently, not just accept parser output.
|
| 140 |
+
|
| 141 |
+
### 3.3 Error Detection for Guideline Improvement
|
| 142 |
+
|
| 143 |
+
- **Rehbein & Ruppenhofer (2018)** — *Sprucing up the Trees: Error Detection in Treebanks* (COLING): Ensemble parsing + Bayesian inference for detecting annotation errors. Errors often cluster around specific constructions, revealing guideline gaps. URL: https://aclanthology.org/C18-1010/
|
| 144 |
+
|
| 145 |
+
- **Two languages, one treebank** (2021, Springer): Describes "semi-automatically checking for errors against annotation guidelines and writing either regular expressions on CoNLL-U files, or more complex rules that make use of hierarchical relations." URL: https://link.springer.com/article/10.1007/s10579-021-09573-1
|
| 146 |
+
|
| 147 |
+
### 3.4 IAA as Guideline Quality Metric
|
| 148 |
+
|
| 149 |
+
Inter-annotator agreement serves dual purposes in the combined framework:
|
| 150 |
+
1. **Measures annotation quality**: High IAA = good annotations
|
| 151 |
+
2. **Measures guideline clarity**: Low IAA on specific constructions = guideline needs improvement
|
| 152 |
+
|
| 153 |
+
The UD English-EWT treebank achieved ~96% IAA on a limited double-annotated portion, with "automatic conversion with extensive spot-checking and manual adjudication of ambiguous cases" during UD v2 conversion.
|
| 154 |
+
|
| 155 |
+
### 3.5 LLM-Assisted Annotation and Guidelines
|
| 156 |
+
|
| 157 |
+
- **Parsing the Switch** (arXiv 2025) — LLM-based UD annotation for code-switched and low-resource languages. Shows that UD's complexity "poses challenges for annotators unfamiliar with formal linguistic parsing conventions" and emphasizes "equipping annotators with detailed UD guidelines and hands-on annotation training." URL: https://arxiv.org/html/2506.07274v1
|
| 158 |
+
|
| 159 |
+
- **Wang et al. (2024)** — *Active Learning for NLP with Large Language Models* (arXiv): Proposes consistency-based strategies for mixed human-LLM annotation. LLMs annotate "easy" cases; humans handle uncertain ones. URL: https://arxiv.org/abs/2401.07367
|
| 160 |
+
|
| 161 |
+
---
|
| 162 |
+
|
| 163 |
+
## 4. Specialized Guidelines for Domain/Text Types
|
| 164 |
+
|
| 165 |
+
### 4.1 Guidelines for User-Generated and Non-Standard Text
|
| 166 |
+
|
| 167 |
+
- **Treebanking User-Generated Content** (Springer, 2022): Comprehensive overview of UD-based annotation criteria for user-generated content. Proposes "unified approach to annotate issues that might arise from such texts." URL: https://link.springer.com/article/10.1007/s10579-022-09581-9
|
| 168 |
+
|
| 169 |
+
### 4.2 Guidelines for Spoken Language
|
| 170 |
+
|
| 171 |
+
- **Annotation guidelines of UD and SUD treebanks for spoken corpora** (TLT 2021): Addresses segmentation, disfluencies, paratactic constructions for spoken French, Naija, and Beja. URL: https://aclanthology.org/2021.tlt-1.4/
|
| 172 |
+
|
| 173 |
+
### 4.3 Guidelines for Legal Domain (Relevant to UDD-1)
|
| 174 |
+
|
| 175 |
+
No specific UD guidelines exist for legal domain text. This is a gap that UDD-1 could address, as legal Vietnamese has:
|
| 176 |
+
- Complex subordination patterns (legal clauses, conditions)
|
| 177 |
+
- Specialized terminology requiring specific deprel choices
|
| 178 |
+
- Formulaic structures that may be under-represented in existing Vietnamese treebanks
|
| 179 |
+
|
| 180 |
+
---
|
| 181 |
+
|
| 182 |
+
## 5. Vietnamese-Specific Annotation Guidelines
|
| 183 |
+
|
| 184 |
+
### 5.1 Existing Vietnamese Treebank Guidelines
|
| 185 |
+
|
| 186 |
+
- **BKTreebank** (Nguyen et al., LREC 2018): Vietnamese dependency treebank with documented annotation guidelines. URL: https://aclanthology.org/L18-1341/
|
| 187 |
+
|
| 188 |
+
- **VnDT** (Nguyen et al., 2014): Vietnamese Dependency Treebank with 10K+ sentences. URL: https://github.com/datquocnguyen/VnDT
|
| 189 |
+
|
| 190 |
+
- **Vietnamese UD (VTB)**: Vietnamese Treebank in UD with language-specific guidelines at universaldependencies.org.
|
| 191 |
+
|
| 192 |
+
- **Ensuring Annotation Consistency and Accuracy for Vietnamese Treebank** (Springer, 2017): Addresses quality assurance in Vietnamese treebank annotation. URL: https://link.springer.com/article/10.1007/s10579-017-9398-3
|
| 193 |
+
|
| 194 |
+
- **VLSP 2019 Shared Task on Vietnamese Dependency Parsing**: Evaluation context for Vietnamese dependency parsing. URL: https://vlsp.org.vn/vlsp2019/eval/udp
|
| 195 |
+
|
| 196 |
+
### 5.2 Key Vietnamese-Specific Annotation Challenges
|
| 197 |
+
|
| 198 |
+
Based on existing literature and UDD-1's current issues:
|
| 199 |
+
|
| 200 |
+
1. **Word segmentation boundary**: Where to segment multi-syllable words affects POS and deprel
|
| 201 |
+
2. **Copula `là`**: Only Vietnamese copula; guidelines needed for its various functions
|
| 202 |
+
3. **Passive markers `được/bị`**: Complex auxiliary vs. main verb distinction
|
| 203 |
+
4. **Serial verb constructions**: Common in Vietnamese; UD treatment requires explicit guidelines
|
| 204 |
+
5. **Classifier constructions**: Numeral-classifier-noun patterns need specific deprel conventions
|
| 205 |
+
6. **Topic-comment structure**: Vietnamese allows topic fronting; affects subject/topic annotation
|
| 206 |
+
7. **Legal domain vocabulary**: Specialized terms (điều, khoản, mục) with specific syntactic roles
|
| 207 |
+
|
| 208 |
+
---
|
| 209 |
+
|
| 210 |
+
## 6. Proposed Integrated Framework for UDD-1
|
| 211 |
+
|
| 212 |
+
### 6.1 Phase 1: Initial Guideline Bootstrapping (Weeks 1-2)
|
| 213 |
+
|
| 214 |
+
1. **Compile base guidelines**: Adapt from VTB/BKTreebank + universal UD guidelines
|
| 215 |
+
2. **Identify known challenges**: Document the 7 Vietnamese-specific challenges above
|
| 216 |
+
3. **Create decision trees**: For common ambiguous constructions (aux vs. main verb, UPOS assignment for mismatched XPOS)
|
| 217 |
+
4. **Prepare pilot annotation set**: Use AL to select ~50 diverse sentences spanning different difficulty levels
|
| 218 |
+
|
| 219 |
+
### 6.2 Phase 2: Pilot Annotation + Guideline Refinement (Weeks 3-4)
|
| 220 |
+
|
| 221 |
+
1. **Double-annotate pilot set**: 2+ annotators independently annotate the 50 selected sentences
|
| 222 |
+
2. **Compute IAA**: Measure agreement at UPOS, HEAD, and DEPREL levels
|
| 223 |
+
3. **Disagreement analysis**: Categorize disagreements by:
|
| 224 |
+
- UPOS disagreements (e.g., AUX vs. VERB for `được`)
|
| 225 |
+
- HEAD attachment disagreements (e.g., PP attachment ambiguity)
|
| 226 |
+
- DEPREL label disagreements (e.g., `nsubj` vs. `dislocated` for topic-fronted NPs)
|
| 227 |
+
4. **Guideline revision round 1**: Add explicit rules for top-N disagreement categories
|
| 228 |
+
5. **Annotate 50 more sentences with revised guidelines**: Measure IAA improvement
|
| 229 |
+
|
| 230 |
+
### 6.3 Phase 3: Active Learning Loop with Guideline Co-Evolution (Weeks 5+)
|
| 231 |
+
|
| 232 |
+
```
|
| 233 |
+
For each AL iteration:
|
| 234 |
+
1. Train parser on current annotated data
|
| 235 |
+
2. Select batch of informative sentences (DPP + uncertainty)
|
| 236 |
+
3. Within batch, select uncertain arcs (head entropy)
|
| 237 |
+
4. Annotate selected arcs (2 annotators for guideline-ambiguous cases)
|
| 238 |
+
5. Flag disagreements → add to "guideline review queue"
|
| 239 |
+
6. Every N iterations:
|
| 240 |
+
a. Review accumulated disagreements
|
| 241 |
+
b. Identify systematic patterns → guideline gaps
|
| 242 |
+
c. Update guidelines
|
| 243 |
+
d. Re-annotate affected previous annotations if necessary
|
| 244 |
+
7. Retrain parser with corrected annotations
|
| 245 |
+
```
|
| 246 |
+
|
| 247 |
+
### 6.4 Guideline Maturity Levels
|
| 248 |
+
|
| 249 |
+
| Level | IAA (LAS) | Guideline Status | AL Focus |
|
| 250 |
+
|-------|-----------|-----------------|----------|
|
| 251 |
+
| 1 (Pilot) | <80% | Draft, many gaps | Diverse constructions to find gaps |
|
| 252 |
+
| 2 (Development) | 80-90% | Revised, major issues resolved | High-uncertainty arcs |
|
| 253 |
+
| 3 (Stable) | 90-95% | Stable, minor refinements | Remaining error patterns |
|
| 254 |
+
| 4 (Production) | >95% | Finalized, Vietnamese-specific complete | Systematic coverage of all constructions |
|
| 255 |
+
|
| 256 |
+
### 6.5 Expected Outcomes
|
| 257 |
+
|
| 258 |
+
1. **Vietnamese-specific UD annotation guidelines**: Comprehensive document covering all Vietnamese-specific decisions, with rationale and examples
|
| 259 |
+
2. **Gold-standard evaluation set**: ~500-2000 sentences double-annotated with high IAA
|
| 260 |
+
3. **Improved parser**: Retrained on corrected data, measuring improvement in LAS/UAS
|
| 261 |
+
4. **Guideline-error catalog**: Documented common parser errors mapped to the guideline clarifications that resolve them
|
| 262 |
+
|
| 263 |
+
---
|
| 264 |
+
|
| 265 |
+
## 7. Additional Papers on Guideline Development Methodology
|
| 266 |
+
|
| 267 |
+
### Iterative Refinement
|
| 268 |
+
|
| 269 |
+
- **Iterative Transformation of Annotation Guidelines for Constituency Parsing** (ACL 2013): Developed "a handbook of domain-customized syntactic parsing guidelines based on iterative annotation and adjudication between two institutions." URL: https://mirror.aclweb.org/acl2013/short/2300.html
|
| 270 |
+
|
| 271 |
+
- **Text Annotation Handbook** (arXiv:2310.11780, 2023): Comprehensive guide recommending "several annotation cycles with collective guideline improvement after each iteration" and a "two-tiered IAA validation process using qualitative analysis of disagreement to systematically refine guidelines." URL: https://arxiv.org/pdf/2310.11780
|
| 272 |
+
|
| 273 |
+
### Bootstrapping Approaches
|
| 274 |
+
|
| 275 |
+
- **Bootstrapping UD treebanks for Delexicalized Parsing** (2019, DepLing): "Simple models for treebank generation are cheaper than human annotated treebanks, especially in the lower ends of the learning curves." URL: https://aclanthology.org/W19-6102/
|
| 276 |
+
|
| 277 |
+
- **Starting a new treebank? Go SUD!** (2021, DepLing): Advocates Surface-Syntactic UD as a more intuitive starting point for new treebanks, convertible to UD. URL: https://aclanthology.org/2021.depling-1.4/
|
| 278 |
+
|
| 279 |
+
- **Bootstrapping a Swedish Treebank Using Cross-Corpus Harmonization and Annotation Projection**: Demonstrates cross-corpus bootstrapping techniques.
|
| 280 |
+
|
| 281 |
+
### Quality Assurance
|
| 282 |
+
|
| 283 |
+
- **Active Learning-Based Corpus Annotation — The PathoJen Experience** (PMC, 2013): "Active learning-based annotation saves costs without sacrificing quality by directing hard-to-decide instances to human annotators while easier instances are handled automatically." URL: https://pmc.ncbi.nlm.nih.gov/articles/PMC3540513/
|
| 284 |
+
|
| 285 |
+
- **Inter-annotator Agreement for Dependency Annotation** (ACL Workshop 2013): Specifically addresses IAA measurement for dependency parsing tasks. URL: https://aclanthology.org/W13-1723/
|
| 286 |
+
|
| 287 |
+
---
|
| 288 |
+
|
| 289 |
+
## 8. Research Gap and Novelty
|
| 290 |
+
|
| 291 |
+
**The primary gap**: No published work formally integrates active learning with annotation guideline co-development for UD treebanks. Existing work treats them as separate:
|
| 292 |
+
- AL papers assume fixed guidelines
|
| 293 |
+
- Guideline papers assume random or sequential data selection
|
| 294 |
+
|
| 295 |
+
**The UDD-1 opportunity**: By explicitly designing a framework where:
|
| 296 |
+
1. AL selects examples that expose guideline ambiguities
|
| 297 |
+
2. Guideline refinements are tracked and linked to specific disagreements
|
| 298 |
+
3. The parser's systematic errors drive both guideline updates and re-annotation
|
| 299 |
+
4. Vietnamese-specific guidelines emerge organically from the hardest cases
|
| 300 |
+
|
| 301 |
+
This would be a **novel methodological contribution** publishable at venues like UDW, LAW, or LREC.
|
| 302 |
+
|
| 303 |
+
---
|
| 304 |
+
|
| 305 |
+
## References
|
| 306 |
+
|
| 307 |
+
See [papers.md](papers.md) for the full paper database and [bibliography.bib](bibliography.bib) for BibTeX entries.
|
| 308 |
+
|
| 309 |
+
### Additional References for Guideline Development
|
| 310 |
+
|
| 311 |
+
```bibtex
|
| 312 |
+
@article{demarneffe2021universal,
|
| 313 |
+
title={Universal Dependencies},
|
| 314 |
+
author={de Marneffe, Marie-Catherine and Manning, Christopher D and Nivre, Joakim and Zeman, Daniel},
|
| 315 |
+
journal={Computational Linguistics},
|
| 316 |
+
volume={47},
|
| 317 |
+
number={2},
|
| 318 |
+
pages={255--308},
|
| 319 |
+
year={2021}
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
@inproceedings{wissler2014iterative,
|
| 323 |
+
title={Iterative Refinement and Quality Checking of Annotation Guidelines},
|
| 324 |
+
author={Wissler, Lars and others},
|
| 325 |
+
booktitle={Proceedings of LREC 2012},
|
| 326 |
+
year={2012}
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
@inproceedings{acl2013iterative,
|
| 330 |
+
title={Iterative Transformation of Annotation Guidelines for Constituency Parsing},
|
| 331 |
+
booktitle={ACL 2013 Short Papers},
|
| 332 |
+
year={2013}
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
@article{rehbein2016bias,
|
| 336 |
+
title={Bias and Agreement in Syntactic Annotations},
|
| 337 |
+
author={Rehbein, Ines and others},
|
| 338 |
+
year={2016}
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
@article{treebanking_ugc2022,
|
| 342 |
+
title={Treebanking User-Generated Content: A UD-based Overview},
|
| 343 |
+
journal={Language Resources and Evaluation},
|
| 344 |
+
year={2022}
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
@inproceedings{ud_spoken2021,
|
| 348 |
+
title={Annotation guidelines of UD and SUD treebanks for spoken corpora},
|
| 349 |
+
booktitle={TLT 2021},
|
| 350 |
+
year={2021}
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
@inproceedings{sinhala_ud2023,
|
| 354 |
+
title={Sinhala Dependency Treebank},
|
| 355 |
+
booktitle={UDW @ GURT 2023},
|
| 356 |
+
year={2023}
|
| 357 |
+
}
|
| 358 |
+
```
|
active_learning/references/research_active_learning_ud/papers.md
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Paper Database: Active Learning for UD Treebank Construction
|
| 2 |
+
|
| 3 |
+
## Core Papers: Active Learning for Dependency Parsing
|
| 4 |
+
|
| 5 |
+
### 1. Sample Selection for Statistical Parsing
|
| 6 |
+
- **Authors**: Rebecca Hwa
|
| 7 |
+
- **Venue**: Computational Linguistics, 30(3):253-276, 2004
|
| 8 |
+
- **URL**: https://direct.mit.edu/coli/article/30/3/253/1841/Sample-Selection-for-Statistical-Parsing
|
| 9 |
+
- **Citations**: ~400+
|
| 10 |
+
- **Summary**: Seminal paper on using sample selection (active learning) for parsing. Proposes uncertainty-based criteria for selecting helpful training examples. Shows that sample selection significantly reduces annotated training corpus size across two syntactic learning tasks and two models.
|
| 11 |
+
- **Key Contributions**: (1) Established uncertainty sampling as robust criterion for parsing AL; (2) Compared multiple selection criteria; (3) Showed generalizability across parsers
|
| 12 |
+
- **Relevance to UDD-1**: Foundational methodology; uncertainty sampling applicable to selecting sentences from UDD-1 for gold-standard annotation
|
| 13 |
+
|
| 14 |
+
### 2. Active Learning for Statistical Natural Language Parsing
|
| 15 |
+
- **Authors**: Min Tang, Xiaoqiang Luo, Salim Roukos
|
| 16 |
+
- **Venue**: ACL 2002, pp. 120-127
|
| 17 |
+
- **URL**: https://aclanthology.org/P02-1016/
|
| 18 |
+
- **Summary**: First application of active learning to statistical parsing. Experimented with sample selection algorithms including uncertainty-based and clustering-based selection.
|
| 19 |
+
- **Key Contributions**: Pioneered AL for parsing; compared uncertainty vs. clustering approaches
|
| 20 |
+
- **Relevance to UDD-1**: Historical foundation; clustering-based selection may help select diverse sentences across UDD-1's legal and news domains
|
| 21 |
+
|
| 22 |
+
### 3. Active Learning for Dependency Parsing with Partial Annotation
|
| 23 |
+
- **Authors**: Zhenghua Li, Min Zhang, Yue Zhang, Zhanyi Liu, Wenliang Chen, Hua Wu, Haifeng Wang
|
| 24 |
+
- **Venue**: ACL 2016, pp. 344-354
|
| 25 |
+
- **URL**: https://aclanthology.org/P16-1033/
|
| 26 |
+
- **Summary**: Most comprehensive study of partial annotation for dependency parsing AL. Uses head entropy to select the most informative individual arcs for annotation. Shows that partial annotation AL achieves comparable performance with significantly less effort.
|
| 27 |
+
- **Key Contributions**: (1) Head entropy as arc-level informativeness measure; (2) Partial annotation framework; (3) Comparable performance with 40-60% less annotation
|
| 28 |
+
- **Methodology**: Head entropy measures the entropy of the distribution over possible heads for each token, using the Inside-Outside algorithm on a probabilistic parser
|
| 29 |
+
- **Relevance to UDD-1**: **Directly applicable** — can select uncertain arcs in UDD-1's silver-standard trees for human correction
|
| 30 |
+
|
| 31 |
+
### 4. Diversity-Aware Batch Active Learning for Dependency Parsing
|
| 32 |
+
- **Authors**: Tianze Shi, Adrian Benton, Igor Malioutov, Ozan Irsoy
|
| 33 |
+
- **Venue**: NAACL 2021
|
| 34 |
+
- **URL**: https://aclanthology.org/2021.naacl-main.207/
|
| 35 |
+
- **Summary**: State-of-the-art AL for dependency parsing. Uses determinantal point processes (DPPs) to enforce diversity in batch selection. Shows that diversity-aware selection outperforms uncertainty-only selection, especially in early stages.
|
| 36 |
+
- **Key Contributions**: (1) DPP-based diversity in batch AL; (2) Sentence embeddings for diversity; (3) Superior to strong baselines
|
| 37 |
+
- **Results**: On English newswire (PTB), DPP batch selection reaches same LAS as random with ~20-30% fewer sentences
|
| 38 |
+
- **Relevance to UDD-1**: **Most relevant modern method** for selecting sentences from UDD-1 for annotation
|
| 39 |
+
|
| 40 |
+
### 5. Active Learning for Dependency Parsing Using Partially Annotated Sentences
|
| 41 |
+
- **Authors**: Seyed Abolghasem Mirroshandel, Alexis Nasr
|
| 42 |
+
- **Venue**: IWPT 2011
|
| 43 |
+
- **URL**: https://aclanthology.org/W11-2917/
|
| 44 |
+
- **Summary**: Early work combining partial annotation with AL for dependency parsing. Proposed annotating only uncertain parts of sentences.
|
| 45 |
+
- **Key Contributions**: Pioneered partial annotation for dependency parsing AL
|
| 46 |
+
- **Relevance to UDD-1**: Conceptual framework for correcting uncertain arcs in silver-standard trees
|
| 47 |
+
|
| 48 |
+
### 6. Active Learning for Dependency Parsing by A Committee of Parsers
|
| 49 |
+
- **Authors**: Saeed Majidi, Gregory Crane
|
| 50 |
+
- **Venue**: IWPT 2013, pp. 98-105
|
| 51 |
+
- **URL**: https://aclanthology.org/W13-5711/
|
| 52 |
+
- **Summary**: Applied query-by-committee to dependency parsing with multiple diverse parsers. Committee disagreement indicates informativeness.
|
| 53 |
+
- **Key Contributions**: QBC framework for dependency parsing; analysis of committee diversity effects
|
| 54 |
+
- **Relevance to UDD-1**: Could use multiple parser models (Underthesea, VnCoreNLP, PhoNLP) as committee members
|
| 55 |
+
|
| 56 |
+
### 7. Data-efficient Active Learning for Structured Prediction with Partial Annotation and Self-Training
|
| 57 |
+
- **Authors**: Zhisong Zhang, Emma Strubell, Eduard Hovy
|
| 58 |
+
- **Venue**: Findings of EMNLP 2023
|
| 59 |
+
- **URL**: https://aclanthology.org/2023.findings-emnlp.865/
|
| 60 |
+
- **Summary**: Combines partial annotation (selecting most informative sub-structures) with self-training (model predictions as pseudo-labels). Uses adaptive error estimator for selection ratio. Evaluated across four structured prediction tasks including dependency parsing.
|
| 61 |
+
- **Key Contributions**: (1) Partial annotation + self-training combination; (2) Adaptive selection ratio via error estimator; (3) State-of-the-art cost reduction
|
| 62 |
+
- **Relevance to UDD-1**: **Most applicable modern method** — UDD-1 already has automatic predictions that can serve as pseudo-labels; AL would select which to correct
|
| 63 |
+
|
| 64 |
+
### 8. Using Smaller Constituents Rather Than Sentences in Active Learning for Japanese Dependency Parsing
|
| 65 |
+
- **Authors**: Manabu Sassano, Sadao Kurohashi
|
| 66 |
+
- **Venue**: ACL 2010
|
| 67 |
+
- **URL**: https://aclanthology.org/P10-1037/
|
| 68 |
+
- **Summary**: Key insight that sub-sentential units (bunsetsus) are more efficient annotation units than full sentences. Avoids annotating "easy" parts of sentences.
|
| 69 |
+
- **Key Contributions**: Sub-sentential AL for dependency parsing; ~30% reduction in annotation
|
| 70 |
+
- **Relevance to UDD-1**: Vietnamese word groups could serve as sub-sentential units for annotation
|
| 71 |
+
|
| 72 |
+
### 9. Combining Active Learning and Partial Annotation for Domain Adaptation of a Japanese Dependency Parser
|
| 73 |
+
- **Authors**: Daniel Flannery, Shinsuke Mori
|
| 74 |
+
- **Venue**: IWPT 2015
|
| 75 |
+
- **URL**: https://aclanthology.org/W15-2202/
|
| 76 |
+
- **Summary**: Combined AL with partial annotation for domain adaptation. Particularly effective for adapting to new domains.
|
| 77 |
+
- **Relevance to UDD-1**: Directly relevant — UDD-1 spans legal and news domains; AL could help domain adaptation
|
| 78 |
+
|
| 79 |
+
### 10. Dependency Parsing with Partial Annotations: An Empirical Comparison
|
| 80 |
+
- **Authors**: Yue Zhang, Zhenghua Li, Jun Lang, Qingrong Xia, Min Zhang
|
| 81 |
+
- **Venue**: IJCNLP 2017
|
| 82 |
+
- **URL**: https://aclanthology.org/I17-1006/
|
| 83 |
+
- **Summary**: Compared forest-based training and constrained decoding for parsing with partial annotations. Tested across random, uncertain, and divergent settings on PTB.
|
| 84 |
+
- **Key Contributions**: Systematic comparison of partial annotation methods
|
| 85 |
+
- **Relevance to UDD-1**: Technical methods for training parsers on partially corrected trees
|
| 86 |
+
|
| 87 |
+
---
|
| 88 |
+
|
| 89 |
+
## Annotation Cost and Methodology
|
| 90 |
+
|
| 91 |
+
### 11. Active Learning and the Total Cost of Annotation
|
| 92 |
+
- **Authors**: Jason Baldridge, Miles Osborne
|
| 93 |
+
- **Venue**: EMNLP 2004
|
| 94 |
+
- **URL**: https://aclanthology.org/W04-3202/
|
| 95 |
+
- **Summary**: Critical paper showing that standard AL evaluation ignores variable annotation costs. Longer sentences cost more but are often selected by uncertainty methods.
|
| 96 |
+
- **Key Contributions**: Cost-aware AL evaluation; distinction between instance count and annotation time
|
| 97 |
+
- **Relevance to UDD-1**: Must account for sentence length when selecting from UDD-1 for annotation
|
| 98 |
+
|
| 99 |
+
### 12. An Analysis of Active Learning Strategies for Sequence Labeling Tasks
|
| 100 |
+
- **Authors**: Burr Settles, Mark Craven
|
| 101 |
+
- **Venue**: EMNLP 2008
|
| 102 |
+
- **URL**: https://aclanthology.org/D08-1112/
|
| 103 |
+
- **Summary**: Comprehensive analysis of AL strategies for structured NLP tasks. Compared uncertainty, QBC, and information density methods.
|
| 104 |
+
- **Key Contributions**: Information density weighting; comparison framework; showed combining uncertainty with representativeness is most robust
|
| 105 |
+
- **Relevance to UDD-1**: Framework for evaluating AL strategies; information density applicable to selecting representative sentences
|
| 106 |
+
|
| 107 |
+
### 13. Estimating Annotation Cost for Active Learning in a Multi-Annotator Environment
|
| 108 |
+
- **Authors**: Shilpa Arora, Eric Nyberg, Carolyn P. Rose
|
| 109 |
+
- **Venue**: NAACL HLT 2009 Workshop on AL for NLP
|
| 110 |
+
- **URL**: https://aclanthology.org/W09-1903/
|
| 111 |
+
- **Summary**: Addresses annotation cost estimation with multiple annotators.
|
| 112 |
+
- **Relevance to UDD-1**: Relevant if using multiple annotators for gold-standard creation
|
| 113 |
+
|
| 114 |
+
---
|
| 115 |
+
|
| 116 |
+
## Treebank Construction with Active Learning
|
| 117 |
+
|
| 118 |
+
### 14. Methodological Issues Regarding the Semi-automatic UD Treebank Creation of Under-resourced Languages: The Case of Pomak
|
| 119 |
+
- **Authors**: (Pomak UD team)
|
| 120 |
+
- **Venue**: UDW @ GURT 2023, ACL Anthology
|
| 121 |
+
- **URL**: https://aclanthology.org/2023.udw-1.4/
|
| 122 |
+
- **Summary**: Applied active annotation to create a UD treebank for Pomak (6,351 sentences). Circular process: prediction → correction → retraining.
|
| 123 |
+
- **Key Contributions**: Practical AL pipeline for UD treebank creation; validated for under-resourced language
|
| 124 |
+
- **Relevance to UDD-1**: **Most directly comparable** — similar scenario (under-resourced language, UD treebank creation, semi-automatic annotation)
|
| 125 |
+
|
| 126 |
+
### 15. Exploring Active Learning Approaches in Treebank Development
|
| 127 |
+
- **Venue**: Springer, 2023-2024
|
| 128 |
+
- **URL**: https://link.springer.com/chapter/10.1007/978-3-032-06389-2_39
|
| 129 |
+
- **Summary**: Proposed practical active annotation schemes for treebank development. Applied to Pomak corpus (300 annotated sentences initial study). Experts strategically select sentences, circular annotation process.
|
| 130 |
+
- **Key Contributions**: Active annotation methodology; validation on under-resourced UD treebank
|
| 131 |
+
- **Relevance to UDD-1**: Methodology directly applicable to UDD-1
|
| 132 |
+
|
| 133 |
+
### 16. Increasing Return on Annotation Investment: The Automatic Construction of a Universal Dependency Treebank for Dutch
|
| 134 |
+
- **Authors**: Gosse Bouma, Gertjan van Noord
|
| 135 |
+
- **Venue**: UDW 2017 (NoDaLiDa Workshop)
|
| 136 |
+
- **URL**: https://aclanthology.org/W17-0403/
|
| 137 |
+
- **Summary**: Maximizing return on annotation investment for UD treebanks using automatic parsing + selective manual correction.
|
| 138 |
+
- **Relevance to UDD-1**: Similar approach to UDD-1's current pipeline (automatic parsing + post-processing)
|
| 139 |
+
|
| 140 |
+
### 17. Active DOP: A Constituency Treebank Annotation Tool with Online Learning
|
| 141 |
+
- **Authors**: Andreas van Cranenburgh
|
| 142 |
+
- **Venue**: COLING 2018
|
| 143 |
+
- **URL**: https://aclanthology.org/C18-2009/
|
| 144 |
+
- **Summary**: Practical annotation tool with online learning — model learns from each annotated sentence immediately. Language-independent.
|
| 145 |
+
- **Key Contributions**: Online learning annotation tool; supports AL experimentation
|
| 146 |
+
- **Relevance to UDD-1**: Tool design inspiration for building an annotation interface
|
| 147 |
+
|
| 148 |
+
### 18. Automation of Treebank Annotation
|
| 149 |
+
- **Authors**: Thorsten Brants, Wojciech Skut
|
| 150 |
+
- **Venue**: CoNLL 1998
|
| 151 |
+
- **URL**: https://aclanthology.org/W98-1207/
|
| 152 |
+
- **Summary**: Early foundational work on automating treebank annotation. Showed that pre-annotating with a parser and having humans correct is 3-5x faster than annotation from scratch.
|
| 153 |
+
- **Relevance to UDD-1**: Validates UDD-1's semi-automatic approach
|
| 154 |
+
|
| 155 |
+
### 19. Active Learning and the Irish Treebank
|
| 156 |
+
- **Authors**: Teresa Lynn, Jennifer Foster
|
| 157 |
+
- **Venue**: Semantic Scholar
|
| 158 |
+
- **URL**: https://www.semanticscholar.org/paper/Active-Learning-and-the-Irish-Treebank-Lynn-Foster/dd1e5cfdefa24ec833832e76900952a37d9a3fa1
|
| 159 |
+
- **Summary**: Compared AL bootstrapping to random selection for Irish treebank development. AL outperforms passive learning, but advantage diminishes when measuring actual annotation time.
|
| 160 |
+
- **Key Contributions**: Realistic evaluation of AL for low-resource treebank development
|
| 161 |
+
- **Relevance to UDD-1**: Cautionary finding about the gap between AL theory and practice
|
| 162 |
+
|
| 163 |
+
### 20. Sprucing up the Trees -- Error Detection in Treebanks
|
| 164 |
+
- **Authors**: Ines Rehbein, Josef Ruppenhofer
|
| 165 |
+
- **Venue**: COLING 2018
|
| 166 |
+
- **URL**: https://aclanthology.org/C18-1010/
|
| 167 |
+
- **Summary**: Detecting annotation errors in treebanks using ensemble parsing + Bayesian inference, guided by AL. Applied to both manual and automatic annotations.
|
| 168 |
+
- **Key Contributions**: Error detection method for treebank quality improvement
|
| 169 |
+
- **Relevance to UDD-1**: Could identify errors in UDD-1's silver-standard trees for targeted correction
|
| 170 |
+
|
| 171 |
+
---
|
| 172 |
+
|
| 173 |
+
## Surveys and General Active Learning for NLP
|
| 174 |
+
|
| 175 |
+
### 21. A Survey of Active Learning for Natural Language Processing
|
| 176 |
+
- **Authors**: Zhisong Zhang, Emma Strubell, Eduard Hovy
|
| 177 |
+
- **Venue**: EMNLP 2022
|
| 178 |
+
- **URL**: https://aclanthology.org/2022.emnlp-main.414/
|
| 179 |
+
- **Summary**: Comprehensive survey of AL for NLP. Covers query strategies, structured prediction AL, annotation costs, deep learning integration, and starting/stopping criteria.
|
| 180 |
+
- **Key Contributions**: Taxonomy of AL strategies for NLP; comprehensive bibliography
|
| 181 |
+
- **Relevance to UDD-1**: Essential reference for choosing AL strategies
|
| 182 |
+
|
| 183 |
+
### 22. A Literature Survey of Active Machine Learning in the Context of Natural Language Processing
|
| 184 |
+
- **Authors**: Fredrik Olsson
|
| 185 |
+
- **Venue**: SICS Technical Report T2009:06, 2009
|
| 186 |
+
- **URL**: https://www.semanticscholar.org/paper/A-literature-survey-of-active-machine-learning-in-Olsson/abebd207b1cf56ced502b0bb203d1f231b58d699
|
| 187 |
+
- **Summary**: Early comprehensive survey of AL for NLP applications. Covers query strategies, stopping criteria, and practical considerations.
|
| 188 |
+
- **Relevance to UDD-1**: Historical context and foundational strategies
|
| 189 |
+
|
| 190 |
+
### 23. From Selection to Generation: A Survey of LLM-based Active Learning
|
| 191 |
+
- **Authors**: Yu Xia et al.
|
| 192 |
+
- **Venue**: ACL 2025
|
| 193 |
+
- **URL**: https://aclanthology.org/2025.acl-long.708/
|
| 194 |
+
- **Summary**: Survey examining how LLMs enhance AL frameworks. LLMs used not only for selection but also for generating new data instances.
|
| 195 |
+
- **Key Contributions**: Taxonomy of LLM-based AL methods; emerging paradigm
|
| 196 |
+
- **Relevance to UDD-1**: Could use LLMs as annotators in AL loop for UDD-1
|
| 197 |
+
|
| 198 |
+
### 24. Active Learning for NLP with Large Language Models
|
| 199 |
+
- **Authors**: Xuesong Wang et al.
|
| 200 |
+
- **Venue**: arXiv 2401.07367, 2024
|
| 201 |
+
- **URL**: https://arxiv.org/abs/2401.07367
|
| 202 |
+
- **Summary**: Proposes using LLMs as annotators in AL settings with consistency-based selection for potentially incorrect labels.
|
| 203 |
+
- **Key Contributions**: Mixed annotation strategy (LLM + human); consistency-based selection
|
| 204 |
+
- **Relevance to UDD-1**: Could use LLMs to pre-annotate, then selectively correct with humans
|
| 205 |
+
|
| 206 |
+
---
|
| 207 |
+
|
| 208 |
+
## Related: Low-Resource Parsing and Self-Training
|
| 209 |
+
|
| 210 |
+
### 25. Deep Contextualized Self-training for Low Resource Dependency Parsing
|
| 211 |
+
- **Authors**: Guy Rotman, Roi Reichart
|
| 212 |
+
- **Venue**: TACL 2019, 7:695-713
|
| 213 |
+
- **URL**: https://aclanthology.org/Q19-1044/
|
| 214 |
+
- **Summary**: Self-training algorithm (DCST) using deep contextualized representations. Trains parser on its own output. Effective across languages in low-resource settings.
|
| 215 |
+
- **Key Contributions**: Self-training for low-resource parsing; integration with ELMo/BERT
|
| 216 |
+
- **Relevance to UDD-1**: Self-training component applicable to improving UDD-1's automatic parses
|
| 217 |
+
|
| 218 |
+
### 26. Zero-Shot Dependency Parsing with Worst-Case Aware Automated Curriculum Learning
|
| 219 |
+
- **Authors**: (Multiple authors)
|
| 220 |
+
- **Venue**: ACL 2022
|
| 221 |
+
- **URL**: https://arxiv.org/abs/2203.08555
|
| 222 |
+
- **Summary**: Curriculum learning for dependency parsing. Progressively harder examples improve zero-shot parsing performance.
|
| 223 |
+
- **Key Contributions**: Curriculum learning for parsing; worst-case optimization
|
| 224 |
+
- **Relevance to UDD-1**: Curriculum learning as alternative/complement to AL for parser training
|
| 225 |
+
|
| 226 |
+
---
|
| 227 |
+
|
| 228 |
+
## Related: Query Strategies and Modern Approaches
|
| 229 |
+
|
| 230 |
+
### 27. Revisiting Uncertainty-based Query Strategies for Active Learning with Transformers
|
| 231 |
+
- **Authors**: Christopher Schroder, Andreas Niekler, Martin Potthast
|
| 232 |
+
- **Venue**: Findings of ACL 2022
|
| 233 |
+
- **URL**: https://aclanthology.org/2022.findings-acl.172/
|
| 234 |
+
- **Summary**: Revisits uncertainty-based queries with transformer models. Alternative uncertainty methods outperform prediction entropy; SOTA query strategies induce prohibitive runtime.
|
| 235 |
+
- **Key Contributions**: Practical AL strategy evaluation with modern models
|
| 236 |
+
- **Relevance to UDD-1**: Guidance on which uncertainty methods to use with transformer-based parsers
|
| 237 |
+
|
| 238 |
+
### 28. Cold-start Active Learning through Self-supervised Language Modeling
|
| 239 |
+
- **Authors**: Michelle Yuan, Hsuan-Tien Lin, Jordan Boyd-Graber
|
| 240 |
+
- **Venue**: EMNLP 2020
|
| 241 |
+
- **URL**: https://aclanthology.org/2020.emnlp-main.637/
|
| 242 |
+
- **Summary**: Addresses cold-start problem by using pre-trained LM features (masked LM loss) for initial selection before any annotations.
|
| 243 |
+
- **Key Contributions**: Cold-start AL with pre-trained models
|
| 244 |
+
- **Relevance to UDD-1**: If starting annotation from scratch, could use PhoBERT's MLM loss for initial sentence selection
|
| 245 |
+
|
| 246 |
+
### 29. Multi-task Active Learning for Pre-trained Transformer-based Models
|
| 247 |
+
- **Authors**: (TACL 2022)
|
| 248 |
+
- **Venue**: TACL 2022
|
| 249 |
+
- **URL**: https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00515/113664/
|
| 250 |
+
- **Summary**: Multi-task AL for transformers across multiple NLP tasks simultaneously.
|
| 251 |
+
- **Key Contributions**: Multi-task AL framework for transformers
|
| 252 |
+
- **Relevance to UDD-1**: Could jointly select sentences for POS, deprel, and head correction
|
| 253 |
+
|
| 254 |
+
### 30. Active Learning for Sequence Tagging with Deep Pre-trained Models
|
| 255 |
+
- **Venue**: EACL 2021
|
| 256 |
+
- **URL**: https://aclanthology.org/2021.eacl-main.145/
|
| 257 |
+
- **Summary**: AL strategies for sequence tagging with pre-trained models.
|
| 258 |
+
- **Relevance to UDD-1**: Applicable to POS tagging component of UD annotation
|
| 259 |
+
|
| 260 |
+
---
|
| 261 |
+
|
| 262 |
+
## Vietnamese NLP Resources (No AL Applied)
|
| 263 |
+
|
| 264 |
+
### 31. BKTreebank: Building a Vietnamese Dependency Treebank
|
| 265 |
+
- **Authors**: Kiem-Hieu Nguyen et al.
|
| 266 |
+
- **Venue**: LREC 2018
|
| 267 |
+
- **URL**: https://aclanthology.org/L18-1341/
|
| 268 |
+
- **Summary**: Vietnamese dependency treebank (BKTreebank). Built manually without AL.
|
| 269 |
+
- **Relevance to UDD-1**: Existing Vietnamese treebank for comparison; potential training data for AL parser
|
| 270 |
+
|
| 271 |
+
### 32. VnCoreNLP: A Vietnamese Natural Language Processing Toolkit
|
| 272 |
+
- **Authors**: Dat Quoc Nguyen et al.
|
| 273 |
+
- **Venue**: NAACL 2018
|
| 274 |
+
- **URL**: https://aclanthology.org/N18-5012/
|
| 275 |
+
- **Summary**: Vietnamese NLP toolkit with word segmentation, POS tagging, NER, and dependency parsing.
|
| 276 |
+
- **Relevance to UDD-1**: Potential committee member parser for QBC-based AL
|
| 277 |
+
|
| 278 |
+
### 33. PhoNLP: A Joint Multi-task Learning Model for Vietnamese POS Tagging, NER, and Dependency Parsing
|
| 279 |
+
- **Authors**: (VinAI Research)
|
| 280 |
+
- **Venue**: NAACL 2021
|
| 281 |
+
- **URL**: https://research.vinai.io/phonlp/
|
| 282 |
+
- **Summary**: BERT-based multi-task model for Vietnamese NLP tasks.
|
| 283 |
+
- **Relevance to UDD-1**: Potential base model for AL pipeline; multi-task capabilities align with joint annotation
|
| 284 |
+
|
| 285 |
+
---
|
| 286 |
+
|
| 287 |
+
## Additional Related Papers
|
| 288 |
+
|
| 289 |
+
### 34. Active Annotation
|
| 290 |
+
- **Authors**: Andreas Vlachos
|
| 291 |
+
- **Venue**: ATEM Workshop, 2006
|
| 292 |
+
- **URL**: https://aclanthology.org/W06-2209/
|
| 293 |
+
- **Summary**: Semi-supervised learning combining unsupervised initial annotation with human correction of detected errors.
|
| 294 |
+
- **Relevance to UDD-1**: Similar to UDD-1's approach of automatic annotation + correction
|
| 295 |
+
|
| 296 |
+
### 35. Ensemble-based Active Learning for Parse Selection
|
| 297 |
+
- **Venue**: NAACL 2004
|
| 298 |
+
- **URL**: https://aclanthology.org/N04-1012/
|
| 299 |
+
- **Summary**: Ensemble methods for active learning in parse selection tasks.
|
| 300 |
+
- **Relevance to UDD-1**: Ensemble approach for parser disagreement detection
|
guidelines/00. Sentence Selection.md
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Sentence Selection Criteria
|
| 2 |
+
|
| 3 |
+
This document describes the criteria used to select sentences for inclusion in the UDD-1 treebank. Sentences are drawn from 5 domains (Legal, News, Wikipedia, Fiction, Non-fiction), each contributing 8,000 sentences to a total of 40,000.
|
| 4 |
+
|
| 5 |
+
## 1. Preprocessing
|
| 6 |
+
|
| 7 |
+
All source texts undergo the same preprocessing before sentence selection:
|
| 8 |
+
|
| 9 |
+
1. **Unicode normalization** via `underthesea.text_normalize()`
|
| 10 |
+
2. **Markdown removal**: headers (`#`), bold/italic (`**`, `*`), links (`[text](url)`), horizontal rules (`---`)
|
| 11 |
+
3. **Whitespace normalization**: collapse multiple newlines, strip leading/trailing whitespace per line
|
| 12 |
+
4. **Sentence segmentation** via `underthesea.sent_tokenize()`
|
| 13 |
+
|
| 14 |
+
## 2. Common Criteria (All Domains)
|
| 15 |
+
|
| 16 |
+
Every sentence must pass the following checks:
|
| 17 |
+
|
| 18 |
+
| Criteria | Condition |
|
| 19 |
+
|----------|-----------|
|
| 20 |
+
| Non-empty | Sentence must not be blank after stripping |
|
| 21 |
+
| Minimum length | >= 20 characters |
|
| 22 |
+
| Maximum length | <= 300 characters |
|
| 23 |
+
| Contains Vietnamese | Must contain at least one Vietnamese diacritical character (e.g., à, á, ả, ã, ạ, ă, â, đ, ê, ô, ơ, ư) |
|
| 24 |
+
| Not mostly uppercase | Uppercase characters must be <= 50% of total characters |
|
| 25 |
+
| No URLs | Must not contain `http`, `www.`, `.com`, `.vn` |
|
| 26 |
+
|
| 27 |
+
## 3. Domain-Specific Criteria
|
| 28 |
+
|
| 29 |
+
### 3.1 Legal (source: `undertheseanlp/UTS_VLC`)
|
| 30 |
+
|
| 31 |
+
Additional filters for legal text noise:
|
| 32 |
+
|
| 33 |
+
- **Header removal**: skip lines matching `QUỐC HỘI`, `CỘNG HÒA`, `Độc lập`, `Phần thứ`, `Chương [IVX]+`, `MỤC \d+`
|
| 34 |
+
- **Article/clause titles**: skip `Điều \d+`, `Khoản \d+`, `Mục \d+`
|
| 35 |
+
- **Metadata lines**: skip lines starting with `English:`, `Số hiệu:`, `Ngày hiệu lực:`, `---`, `|`
|
| 36 |
+
- **Trailing list markers**: remove trailing `1.` or `a)` patterns before validation
|
| 37 |
+
- **Incomplete lines**: skip lines ending with a bare number
|
| 38 |
+
|
| 39 |
+
### 3.2 News (source: `undertheseanlp/UVN-1`)
|
| 40 |
+
|
| 41 |
+
Additional filters for journalism artifacts:
|
| 42 |
+
|
| 43 |
+
- **Bylines**: skip `Theo ...`, `PV ...`, `Nguồn:`, `Ảnh:`, `Video:`, `Bài:`, `Tin ảnh:`
|
| 44 |
+
- **Photo captions**: skip sentences ending with `(Ảnh: ...)` or `(Nguồn: ...)`
|
| 45 |
+
- **Timestamps**: skip sentences starting with date (`dd/mm/yyyy`) or time (`hh:mm`) patterns
|
| 46 |
+
- **Tags/categories**: skip `Tags:`, `Chuyên mục:`, `Từ khóa:`
|
| 47 |
+
- **Excessive numbers**: digit characters must be <= 30% of total characters (filters data tables)
|
| 48 |
+
|
| 49 |
+
### 3.3 Wikipedia (source: `undertheseanlp/UVW-2026`)
|
| 50 |
+
|
| 51 |
+
Two-stage filtering:
|
| 52 |
+
|
| 53 |
+
**Stage 1 - Article-level**: only articles with `quality_score >= 5` are considered.
|
| 54 |
+
|
| 55 |
+
**Stage 2 - Sentence-level**:
|
| 56 |
+
- **Stub markers**: skip sentences containing `bài sơ khai`, `sơ khai về`, `cần được mở rộng`, `Thể loại:`
|
| 57 |
+
- **Section headers**: skip `Thể loại`, `Danh sách`, `Xem thêm`, `Tham khảo`, `Liên kết ngoài`, `Chú thích`
|
| 58 |
+
- **Infobox remnants**: skip sentences with more than 2 pipe characters (`|`) or multiple `key=value` patterns
|
| 59 |
+
- **Reference fragments**: skip sentences containing `[1]`, `[2]`, `[cần dẫn nguồn]`
|
| 60 |
+
- **Bullet lists**: skip lines starting with `*`, `-`, or `•`
|
| 61 |
+
- **Excessive numbers**: digit characters must be <= 30% of total characters
|
| 62 |
+
|
| 63 |
+
### 3.4 Fiction & Non-fiction (source: `undertheseanlp/UVB-v0.1`)
|
| 64 |
+
|
| 65 |
+
The most restrictive criteria, designed to select well-formed literary sentences:
|
| 66 |
+
|
| 67 |
+
**Book-level filtering**:
|
| 68 |
+
- Books are classified as fiction or non-fiction based on genre tags (e.g., `Novels`, `Romance`, `Fantasy` for fiction; `History`, `Biography`, `Psychology` for non-fiction)
|
| 69 |
+
- Books are ranked by quality score: `goodreads_rating * min(num_ratings / 100, 10)`, higher-rated books are prioritized
|
| 70 |
+
- Maximum 500 sentences per book to ensure source diversity
|
| 71 |
+
|
| 72 |
+
**Sentence-level filtering** (stricter than other domains):
|
| 73 |
+
|
| 74 |
+
| Criteria | Condition |
|
| 75 |
+
|----------|-----------|
|
| 76 |
+
| Minimum length | >= 30 characters (vs. 20 for others) |
|
| 77 |
+
| Maximum length | <= 250 characters (vs. 300 for others) |
|
| 78 |
+
| Minimum word count | >= 5 words |
|
| 79 |
+
| Maximum word count | <= 40 words |
|
| 80 |
+
| Starts with uppercase | First character must be uppercase |
|
| 81 |
+
| Ends with punctuation | Last character must be one of `.!?…"»` |
|
| 82 |
+
| Uppercase ratio | <= 30% (vs. 50% for others) |
|
| 83 |
+
| Digit ratio | <= 15% (vs. 30% for news/wiki) |
|
| 84 |
+
| Punctuation density | Punctuation count must be <= 1.5x word count |
|
| 85 |
+
| No mid-sentence ellipsis | `...` must not appear before the last 5 characters |
|
| 86 |
+
| Limited dialogue | At most 4 quotation marks (`"`, `"`, `"`) per sentence |
|
| 87 |
+
| No chapter headers | Skip `Chương`, `Phần`, `Mục`, `Điều`, numbered items |
|
| 88 |
+
| No URLs/emails | Skip sentences containing `http`, `www.`, `@`, `.com`, `.vn` |
|
| 89 |
+
|
| 90 |
+
## 4. Sampling Strategy
|
| 91 |
+
|
| 92 |
+
- Each domain contributes exactly **8,000 sentences** (books domain: 8,000 fiction + 8,000 non-fiction)
|
| 93 |
+
- Sentences are collected sequentially from documents until the target count is reached
|
| 94 |
+
- For Wikipedia: only high-quality articles are used (quality_score >= 5)
|
| 95 |
+
- For Books: books are processed in descending order of quality score
|
| 96 |
+
|
| 97 |
+
## 5. Train/Dev/Test Split
|
| 98 |
+
|
| 99 |
+
After collecting all 40,000 sentences, the dataset is split using stratified sampling:
|
| 100 |
+
|
| 101 |
+
| Split | Ratio | Sentences | ~Tokens |
|
| 102 |
+
|-------|-------|-----------|---------|
|
| 103 |
+
| Train | 91.4% | ~36,560 | ~548K |
|
| 104 |
+
| Dev | 4.3% | ~1,720 | ~26K |
|
| 105 |
+
| Test | 4.3% | ~1,720 | ~26K |
|
| 106 |
+
|
| 107 |
+
### Rationale for Split Ratio
|
| 108 |
+
|
| 109 |
+
The 91.4/4.3/4.3 ratio follows established practices in Universal Dependencies:
|
| 110 |
+
|
| 111 |
+
1. **UD official guidelines** (universaldependencies.org) specify that for treebanks with >110K words, dev and test sets should each contain "between 10K words and 10% of the data" (de Marneffe et al., 2021). UDD-1's dev and test sets each contain ~26K tokens, comfortably exceeding the 10K-word minimum while staying under 10%.
|
| 112 |
+
|
| 113 |
+
2. **CoNLL shared task standard.** The CoNLL 2017 and 2018 shared tasks on UD parsing established 10K words as the minimum threshold for a meaningful test set (Zeman et al., 2017; 2018). UDD-1's test set is more than double this minimum.
|
| 114 |
+
|
| 115 |
+
3. **Comparable to major UD treebanks in absolute dev/test size.** Large UD treebanks allocate progressively smaller percentages to dev/test while maintaining sufficient absolute sizes:
|
| 116 |
+
|
| 117 |
+
| Treebank | Total | Dev | Test | Dev% | Test% |
|
| 118 |
+
|----------|-------|-----|------|------|-------|
|
| 119 |
+
| Czech-PDT | 213,897 | 22,666 | 20,187 | 10.6% | 9.4% |
|
| 120 |
+
| Korean-Kaist | 27,363 | 2,066 | 2,287 | 7.6% | 8.4% |
|
| 121 |
+
| English-EWT | 16,622 | 2,001 | 2,077 | 12.0% | 12.5% |
|
| 122 |
+
| Hindi-HDTB | 16,649 | 1,659 | 1,684 | 10.0% | 10.1% |
|
| 123 |
+
| French-GSD | 16,342 | 1,476 | 416 | 9.0% | 2.5% |
|
| 124 |
+
| Japanese-GSD | 8,100 | 507 | 543 | 6.3% | 6.7% |
|
| 125 |
+
| Vietnamese-VTB | 3,323 | 1,123 | 800 | 33.8% | 24.1% |
|
| 126 |
+
|
| 127 |
+
UDD-1's ~1,720-sentence dev/test sets are in the same range as English-EWT (~2,000) and Hindi-HDTB (~1,670).
|
| 128 |
+
|
| 129 |
+
4. **Maximizing training data.** Since UDD-1 is a silver-standard treebank intended for parser training, maximizing the training portion is beneficial. The dev/test sets at ~1,720 sentences each are large enough for reliable evaluation, while the remaining 91.4% provides maximum training signal (Nivre et al., 2020).
|
| 130 |
+
|
| 131 |
+
### Split Method
|
| 132 |
+
|
| 133 |
+
- **Stratified by domain**: each domain is shuffled independently and split according to the ratios above, ensuring proportional representation across all splits
|
| 134 |
+
- **Reproducible**: random seed = 42
|
| 135 |
+
- Sentence IDs encode the domain: `vlc-` (legal), `uvn-` (news), `uvw-` (wikipedia), `uvb-f-` (fiction), `uvb-n-` (non-fiction)
|
| 136 |
+
|
| 137 |
+
## References
|
| 138 |
+
|
| 139 |
+
- de Marneffe, M.-C., Manning, C.D., Nivre, J., & Zeman, D. (2021). Universal Dependencies. *Computational Linguistics*, 47(2), 255-308. https://aclanthology.org/2021.cl-2.11/
|
| 140 |
+
- Nivre, J., et al. (2020). Universal Dependencies v2: An Evergrowing Multilingual Treebank Collection. In *Proceedings of LREC 2020*, pp. 4034-4043. https://aclanthology.org/2020.lrec-1.497/
|
| 141 |
+
- Zeman, D., et al. (2017). CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. In *Proceedings of the CoNLL 2017 Shared Task*, pp. 1-19. https://aclanthology.org/K17-3001/
|
| 142 |
+
- Zeman, D., et al. (2018). CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. In *Proceedings of the CoNLL 2018 Shared Task*, pp. 1-21. https://aclanthology.org/K18-2001/
|
references/2016.lrec.nguyen/paper.md
ADDED
|
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: "Challenges and Solutions for Consistent Annotation of Vietnamese Treebank"
|
| 3 |
+
authors:
|
| 4 |
+
- Quy T. Nguyen
|
| 5 |
+
- Yusuke Miyao
|
| 6 |
+
- Ha T. T. Le
|
| 7 |
+
- Ngan L. T. Nguyen
|
| 8 |
+
year: 2016
|
| 9 |
+
venue: "LREC 2016"
|
| 10 |
+
acl_anthology: "L16-1243"
|
| 11 |
+
url: "https://aclanthology.org/L16-1243/"
|
| 12 |
+
pdf: "https://aclanthology.org/L16-1243.pdf"
|
| 13 |
+
journal_version:
|
| 14 |
+
title: "Ensuring annotation consistency and accuracy for Vietnamese treebank"
|
| 15 |
+
venue: "Language Resources and Evaluation"
|
| 16 |
+
year: 2018
|
| 17 |
+
doi: "10.1007/s10579-017-9398-3"
|
| 18 |
+
url: "https://link.springer.com/article/10.1007/s10579-017-9398-3"
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
# Challenges and Solutions for Consistent Annotation of Vietnamese Treebank
|
| 22 |
+
|
| 23 |
+
**Quy T. Nguyen** [1&2] **, Yusuke Miyao** [1&2] **, Ha T.T. Le** [3] **, Ngan L.T. Nguyen** [4]
|
| 24 |
+
|
| 25 |
+
1The Graduate University for Advanced Studies (SOKENDAI), Japan
|
| 26 |
+
2National Institute of Informatics, Japan
|
| 27 |
+
3University of Social Sciences and Humanities, Vietnam
|
| 28 |
+
4 University of Information Technology, Vietnam
|
| 29 |
+
quynt@nii.ac.jp, yusuke@nii.ac.jp, trucha.ussh@gmail.com, ngannlt@uit.edu.vn
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
**Abstract**
|
| 33 |
+
Treebanks are important resources for research in natural language processing, speech recognition, theoretical linguistics, etc. To
|
| 34 |
+
strengthen the automatic processing of the Vietnamese language, a Vietnamese treebank has been built. However, the quality of this
|
| 35 |
+
treebank is not satisfactory and is a possible source for the low performance of Vietnamese language processing. We have been building
|
| 36 |
+
a new treebank for Vietnamese with about 40,000 sentences annotated with three layers: word segmentation, part-of-speech tagging,
|
| 37 |
+
and bracketing. In this paper, we describe several challenges of Vietnamese language and how we solve them in developing annotation
|
| 38 |
+
guidelines. We also present our methods to improve the quality of the annotation guidelines and ensure annotation accuracy and
|
| 39 |
+
consistency. Experiment results show that inter-annotator agreement ratios and accuracy are higher than 90% which is satisfactory.
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
**Keywords:** Vietnamese Treebank, Consistent Annotation, Challenges and Solutions
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
**1.** **Introduction**
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
Treebanks--corpora annotated with syntactic structures, are
|
| 50 |
+
important resources for researchers in natural language processing (NLP). Treebanks provide important syntactic information in order to improve the quality of NLP tools. To
|
| 51 |
+
strengthen the automatic processing of the Vietnamese language, Nguyen et al. (2009) have built a Vietnamese treebank, named VLSP treebank, containing 10,000 sentences.
|
| 52 |
+
However, the quality of the VLSP treebank, including the
|
| 53 |
+
quality of the annotation scheme, the annotation guidelines,
|
| 54 |
+
and the annotation process, is not satisfactory and is a possible source for the low performance of Vietnamese language
|
| 55 |
+
processing (Nguyen et al., 2012; Nguyen et al., 2013).
|
| 56 |
+
We have been building a new Vietnamese treebank with
|
| 57 |
+
3,000 texts (about 40,000 sentences) covering 14 topics
|
| 58 |
+
collected from a Vietnamese online newspaper, Thanhnien
|
| 59 |
+
news [1] . Our treebank is annotated with three layers: word
|
| 60 |
+
segmentation (WS), part-of-speech (POS) tagging, and
|
| 61 |
+
bracketing as showed in Figure 1 [2] . We have found that ensuring the annotation consistency and accuracy is one of the
|
| 62 |
+
most important considerations in the annotation of a treebank. This requires clear and complete annotation guidelines. The guidelines contain the annotation scheme, consistent principles to annotate linguistic phenomena, and sufficient examples. These documents are not only used to
|
| 63 |
+
train annotators but also valuable sources serving the uses
|
| 64 |
+
of the treebank.
|
| 65 |
+
We prepared three set of guidelines for the Vietnamese treebank: WS guidelines, POS tagging guidelines, and bracketing guidelines. In this paper, Section 2 describes the general
|
| 66 |
+
characteristics of the Vietnamese language in comparison
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
1http://thanhnien.vn
|
| 70 |
+
2Underscore "_" is used to link syllables of Vietnamese multisyllable words. Translation for the Vietnamese word is given as
|
| 71 |
+
a subscript. If the Vietnamese word does not have a translatable
|
| 72 |
+
meaning, the subscript is blank. Translation for a Vietnamese sentence is given in curly brackets below the original text.
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
**Original sentence:**
|
| 77 |
+
|
| 78 |
+
_Nam ke ve tai nan hom qua._
|
| 79 |
+
_{Nam tells about the yesterday's accident.}_
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
**1.** **Word segmentation:**
|
| 83 |
+
|
| 84 |
+
_Nam ke(to tell) ve(about) tai_nan(accident) hom_qua(yesterday) ._
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
**2.** **POS tagging:**
|
| 88 |
+
|
| 89 |
+
_Nam/Nr ke/Vv ve/Cs tai_nan/Nn hom_qua/Nt ./PU_
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
**3.** **Bracketing:**
|
| 93 |
+
_(S_
|
| 94 |
+
|
| 95 |
+
_(NP-SBJ (Nr-H Nam))_
|
| 96 |
+
_(VP (Vv-H ke)_
|
| 97 |
+
|
| 98 |
+
_(PP-DOB (Cs-H ve)_
|
| 99 |
+
|
| 100 |
+
_(NP (Nn-H tai_nan)_
|
| 101 |
+
|
| 102 |
+
_(NP-TMP (Nt-H hom_qua)))))_
|
| 103 |
+
_(PU .))_
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
Figure 1: An example to illustrate process of treeing a Vietnamese sentence.
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
with other languages (e.g., English and Chinese) to indicate that building a high-quality Vietnamese treebank is a
|
| 110 |
+
challenging problem. We also present our methodology to
|
| 111 |
+
tackle the challenges in this section. We then discuss difficulties in WS, POS tagging, and bracketing, and how we
|
| 112 |
+
solve them in developing the annotation guideline in Section 3, 4, and 5 respectively. Finally, in Section 6, we describe our annotation process, how we revise the guidelines
|
| 113 |
+
during the annotation process, and methods to ensure the
|
| 114 |
+
annotation consistency and accuracy.
|
| 115 |
+
This study is not only beneficial for the development of
|
| 116 |
+
computational processing technologies for Vietnamese, a
|
| 117 |
+
language spoken by over 90 million people, but also for
|
| 118 |
+
similar languages such as Thai, Laos, and so on. This study
|
| 119 |
+
also promotes the computational linguistic studies on how
|
| 120 |
+
to transfer methods developed for a popular language, like
|
| 121 |
+
English, to a language that has not yet intensively studied.
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
**2.** **Characteristics of Vietnamese language**
|
| 125 |
+
**and methodology for guideline**
|
| 126 |
+
**preparation**
|
| 127 |
+
|
| 128 |
+
Unlike Western languages, in which blank spaces denote
|
| 129 |
+
word delimiters, in Vietnamese, blank spaces play the roles
|
| 130 |
+
of not only word delimiters but also syllable delimiters
|
| 131 |
+
(Diep, 2005; SCSSV, 1983) that cause difficulties in defining words. In addition, unlike English and Japanese, Vietnamese is not an inflectional language for which morphological forms can provide useful clues for word segmentation and POS tagging. While similar problems also occur with Chinese (Xia et al., 2000), annotating Vietnamese
|
| 132 |
+
words may be more difficult, because the modern Vietnamese writing system is based on Latin characters, which
|
| 133 |
+
represent the pronunciation but not the meaning of words,
|
| 134 |
+
resulting in many homonyms.
|
| 135 |
+
Difficulties in Vietnamese occur in not only determining
|
| 136 |
+
words as mentioned above but also bracketing phrases. One
|
| 137 |
+
of the reasons is that there are many expressions having
|
| 138 |
+
the same POS sequence but different phrase types in Vietnamese. Other difficulties are caused by the fact that word
|
| 139 |
+
order in Vietnamese is very flexible.
|
| 140 |
+
Moreover, there is little consensus in community about
|
| 141 |
+
how to define words, phrases and grammatical structures.
|
| 142 |
+
Though people agree that Vietnamese is the subject-verbobject (SVO) language, Figure 2a shows a sentence in Vietnamese that the head word of the predicate is not a verb.
|
| 143 |
+
For sentences that do not have the main verb, we can use
|
| 144 |
+
the conjunction _thi_ to link the subject and the predicate as
|
| 145 |
+
shown in Figure 2b. However, when the conjunction _thi_ is
|
| 146 |
+
used, linguists disagree about how to bracket this sentence.
|
| 147 |
+
Diep (2005) considered this sentence as a single sentence
|
| 148 |
+
(Figure 2b), where the conjunction _thi_ is used to link the
|
| 149 |
+
subject and the predicate. SCSSV (1983), in contrast, considered this sentence as a subordinate compound sentence
|
| 150 |
+
(Figure 2c) because they said that the conjunction _thi_ is used
|
| 151 |
+
to link two clauses of a subordinate compound sentence.
|
| 152 |
+
We prepared the guidelines for the Vietnamese treebank including three sets: word segmentation guidelines, POS tagging guidelines, and bracketing guidelines. The problems
|
| 153 |
+
were tackled on the basis of the following approaches:
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
- We refer to Vietnamese grammar books (SCSSV,
|
| 157 |
+
1983; Diep, 2005) and discuss with our collaborators,
|
| 158 |
+
who are Vietnamese linguistics experts, to solve the
|
| 159 |
+
ambiguities and difficulties.
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
- We study the guidelines of Chinese Penn Treebank
|
| 163 |
+
(Xia, 2000b; Xia, 2000a; Xue et al., 2000), English
|
| 164 |
+
Penn Treebank (Santorini, 1990; Bies et al., 1995), and
|
| 165 |
+
VLSP treebank (Nguyen et al., 2010b; Nguyen et al.,
|
| 166 |
+
2010a; Nguyen et al., 2010c) and adapt them to our
|
| 167 |
+
guidelines if possible.
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
- During the annotation process, annotators [3] are requested to discuss with us about the constructions that
|
| 171 |
+
they cannot annotate or feel ambiguous. These constructions are important clues to revise the guidelines.
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
- We conduct nine rounds of measurement of interannotator agreement and accuracy, for which two annotators annotate the same data. The inconsistencies
|
| 175 |
+
and annotation errors found in each round are important clues to improve annotation guidelines and to train
|
| 176 |
+
annotators again.
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
Details of applying these approaches during the process of
|
| 180 |
+
building the Vietnamese treebank are explained in the following sections.
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
**3.** **Word segmentation guidelines**
|
| 184 |
+
|
| 185 |
+
**3.1.** **Challenges of word segmentation**
|
| 186 |
+
|
| 187 |
+
Words are the most basic units of a treebank (Sciullo and
|
| 188 |
+
Williams, 1987), and defining words is the first step in
|
| 189 |
+
the annotation process. (Xia, 2000b; Xia, 2000a; Sornlertlamvanich et al., 1999). For languages like English, defining words is almost trivial, because the blank spaces denote word delimiters. However, it is a difficult problem in
|
| 190 |
+
Vietnamese even for a native speaker. Although most linguists agree that the Vietnamese language has two types
|
| 191 |
+
of words, single-syllable words (single words) and multisyllable words (compound words), distinguishing between
|
| 192 |
+
single and multi-syllable words involves much ambiguity.
|
| 193 |
+
The ambiguities of Vietnamese WS occur for the following
|
| 194 |
+
reasons. First, in Vietnamese, blank spaces play the roles
|
| 195 |
+
of not only word delimiters but also syllable delimiters.
|
| 196 |
+
Second, there are no morphological marks to act as important clues to identify words. Third, the Vietnamese writing system is based on Latin characters, which represent
|
| 197 |
+
the pronunciation but not the meaning of words. Expressions that have the same surface form but different word
|
| 198 |
+
segmentation appear frequently in Vietnamese. Rows 1 and
|
| 199 |
+
2 in Table 1, for instance, show two different segmentation
|
| 200 |
+
|
| 201 |
+
3Our treebank is annotated by two annotators who are graduate
|
| 202 |
+
linguistics students.
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
| No. | Expression (A B) | Meaning | WS |
|
| 206 |
+
|-----|-------------------|---------|-----|
|
| 207 |
+
| 1 | quan ao | clothes | a word |
|
| 208 |
+
| 2 | quan ao | trousers and shirt | 2 words |
|
| 209 |
+
| 3 | an noi | to speak | a word |
|
| 210 |
+
| 4 | tim kiem | to find | a word |
|
| 211 |
+
| 5 | noi dong | copper pot | 2 words |
|
| 212 |
+
| 6 | noi bang dong | copper pot | 3 words |
|
| 213 |
+
| 7 | den dua | black | a word |
|
| 214 |
+
| 8 | ca heo | dolphin | a word |
|
| 215 |
+
| 9 | ca lia thia | betta fish | 2 words |
|
| 216 |
+
| 10 | nghien cuu vien | researcher | 2 words |
|
| 217 |
+
| 11 | nha nghien cuu | researcher | 2 words |
|
| 218 |
+
|
| 219 |
+
Table 1: Examples to illustrate the principles of word segmentation.
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
types of the expression _quan ao_ . Fourth, there is little consistency in segmenting the expressions. For example, some
|
| 223 |
+
linguists consider the expression _ca roanabas {anabas}_
|
| 224 |
+
as a compound word but _benh soi {measles}_
|
| 225 |
+
as two words (Hoang, 1998; Diep, 2005). However, these
|
| 226 |
+
expressions have a similar construction: the combination of
|
| 227 |
+
a categorization noun [4] and a specific noun.
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
**3.2.** **Policy for annotation of word segmentation**
|
| 231 |
+
|
| 232 |
+
As mentioned above, our purpose for word segmentation
|
| 233 |
+
is to build a treebank for Vietnamese. Therefore, we consider a word as the smallest syntactic unit having a complete meaning and preventing syntactic rules from analyzing word structure (Sciullo and Williams, 1987). On the basis of this word definition, we propose the following rules
|
| 234 |
+
to solve the difficulties in Vietnamese word segmentation:
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
- If A and B [5] have different meanings and the meaning
|
| 238 |
+
of the combination form (A_B) is different from the
|
| 239 |
+
split form (A B), we select the form that has a meaning more appropriate for the context. Examples 1 and
|
| 240 |
+
2 in Table 1 show an expression having two different
|
| 241 |
+
meanings because of different word segmentation.
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
- If A and B have different meanings and A_B has the
|
| 245 |
+
same meaning as A or B, the combination form is selected. The example is given in row 3 of Table 1.
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
- If A and B have the same meaning, the combination
|
| 249 |
+
form is selected (example 4 in Table 1).
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
- If another syllable can be inserted between A and B,
|
| 253 |
+
we select the split form (examples 5 and 6 in Table 1).
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
- If A is a word and B is not (or vice versa), we select the
|
| 257 |
+
combination form. Example 7 in Table 1 shows that if
|
| 258 |
+
_dua_ is considered as a single word, its meaning is undefined. Therefore, it is considered as part of a multisyllable word.
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
- For the expression of a categorization noun (A) and
|
| 262 |
+
a specific noun (B), if B indicates something different
|
| 263 |
+
from what the expression indicates, A_B is considered
|
| 264 |
+
as a compound word. In contrast, if B has a similar
|
| 265 |
+
meaning to A B, A and B are considered as two words
|
| 266 |
+
(examples 8 and 9 in Table 1).
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
- An expression of one or more Sino-Vietnamese syllables and an original Vietnamese word, in which the
|
| 270 |
+
Sino-Vietnamese syllables are the elements used to
|
| 271 |
+
create the new words, is not considered as a word (example 10 in Table 1).
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
- Special classifier nouns are considered as single words
|
| 275 |
+
(example 11 in Table 1).
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
It should be noted that these rules do not necessarily conform to the rules used by linguists. For example, Diep
|
| 279 |
+
(2005) considers the Sino-Vietnamese syllable _vien_ in
|
| 280 |
+
example 10 in Table 1 as a component of the compound
|
| 281 |
+
word and considers the special classifier noun _nha_ as a
|
| 282 |
+
single word. We, on the other hand, consider both _vien_
|
| 283 |
+
and _nha_ as single words because we found that they
|
| 284 |
+
both have the same grammatical function that is forming
|
| 285 |
+
new words. However, in our guidelines, the word types for
|
| 286 |
+
which there is little consensus between linguists for segmenting them are annotated with additional information so
|
| 287 |
+
that such words can be automatically converted according
|
| 288 |
+
to the need.
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
**4.** **Part-of-speech tagging guidelines**
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
**4.1.** **Challenges of POS tagging**
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
Tagging POS for Vietnamese words is not a trivial problem
|
| 298 |
+
because they are not marked with morphological features,
|
| 299 |
+
such as tense, number, gender, etc. While the same problem also appears with Chinese, Vietnamese may be more
|
| 300 |
+
difficult, because the Vietnamese writing system is based
|
| 301 |
+
on Latin characters, which represent the pronunciation, but
|
| 302 |
+
not the meaning of words.
|
| 303 |
+
Words that have the same surface form and pronunciation
|
| 304 |
+
but different meanings and grammar functions occur frequently in the text. For example, we can understand the
|
| 305 |
+
word _moi_ in accordance with two meanings shown in rows
|
| 306 |
+
1 and 2 of Table 2. If we consider _moi_ as an adjective modifying the preceding word, the noun _nghien_cuu(research)_,
|
| 307 |
+
it means _new_ ; The word _moi_ means _recently_ or _just_ if we
|
| 308 |
+
consider it as an adjunct modifying the following word, the
|
| 309 |
+
verb _thuc_hien(to conduct)_ .
|
| 310 |
+
Determining POS of the words having the same surface
|
| 311 |
+
form may be more ambiguous because a verb or an adjective can appear in the position of a noun as in the case of
|
| 312 |
+
_bao cao_ in rows 3 and 4 of Table 2. Solely referring to the
|
| 313 |
+
sentence, we do not have any clue to determine if _bao cao_
|
| 314 |
+
belongs to the verb class or noun class. _Bao cao_ means _defend_ if it is considered as a verb (row 3) and _thesis_ if it is
|
| 315 |
+
considered as a noun (row 4).
|
| 316 |
+
Ambiguity of the POS tagging is also caused by the omission of words which happens frequently in Vietnamese. For
|
| 317 |
+
example, if a verb or an adjective plays the same roles as
|
| 318 |
+
a noun, it is actually preceded by a special classifier noun
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
| No. | POS tag | Meaning of tag | No. | POS tag | Meaning of tag |
|
| 322 |
+
|-----|---------|----------------|-----|---------|----------------|
|
| 323 |
+
| 1 | SV | Sino-Vietnamese syllable | 17 | NA | Noun-adjective |
|
| 324 |
+
| 2 | Nc | Classifier noun | 18 | Vcp | Comparative verb |
|
| 325 |
+
| 3 | Ncs | Special classifier noun | 19 | Vv | Other verb |
|
| 326 |
+
| 4 | Nu | Unit noun | 20 | An | Ordinal number |
|
| 327 |
+
| 5 | Nun | Administrative unit noun | 21 | Aa | Other adjective |
|
| 328 |
+
| 6 | Nw | Quantifier indicating the whole | 22 | Pd | Demonstrative pronoun |
|
| 329 |
+
| 7 | Num | Number | 23 | Pp | Other pronoun |
|
| 330 |
+
| 8 | Nq | Other quantifier | 24 | R | Adjunct |
|
| 331 |
+
| 9 | Nr | Proper noun | 25 | Cs | Preposition or conjunction introducing a clause |
|
| 332 |
+
| 10 | Nt | Noun of time | 26 | Cp | Other conjunction |
|
| 333 |
+
| 11 | Nn | Other noun | 27 | ON | Onomatopoeia |
|
| 334 |
+
| 12 | Ve | Existing verb | 28 | ID | Idioms |
|
| 335 |
+
| 13 | Vc | Copula "la" verb | 29 | E | Exclamation word |
|
| 336 |
+
| 14 | D | Directional verb | 30 | M | Modifier word |
|
| 337 |
+
| 15 | VA | Verb-adjective | 31 | FW | Foreign word |
|
| 338 |
+
| 16 | VN | Verb-noun | 32 | X | Unidentified word |
|
| 339 |
+
| | | | 33 | PU | Punctuation |
|
| 340 |
+
|
| 341 |
+
Table 3: POS tag set designed for our treebank.
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
(as the case of _bao cao_ in rows 5 [6] of Table 2). Otherwise,
|
| 345 |
+
a noun is preceded by a classifier noun [7] (the noun _bao cao_
|
| 346 |
+
in row 6 of Table 2 follows the classifier noun _cuon_ ). However, such useful nouns are usually omitted in Vietnamese
|
| 347 |
+
sentences which causes ambiguity of tagging words.
|
| 348 |
+
Some linguists (SCSSV, 1983; Diep, 2005) have claimed
|
| 349 |
+
that POS can be recognized by referring to the adjuncts
|
| 350 |
+
modifying the words. For example, adjuncts indicating degree and tenses modify adjectives and verbs, respectively.
|
| 351 |
+
However, this method does not necessarily work sufficiently with real texts. In practice, many verbs and adjectives in Vietnamese can be modified by the same adjunct.
|
| 352 |
+
For example, the adjunct indicating tense, _se(will)_ shown in
|
| 353 |
+
Table 2 can modify both the adjective _dep(beautiful)_ (row 7)
|
| 354 |
+
and the verb _di(to go)_ (row 8).
|
| 355 |
+
Because of the above characteristics of Vietnamese, it is
|
| 356 |
+
difficult not only to define the POS tag set but also to tag
|
| 357 |
+
each word in context. In addition, there is still little consensus between linguists as to methodology for classifying
|
| 358 |
+
words in Vietnamese. For instance, both Diep (2005) and
|
| 359 |
+
SCSSV (1983) classified the words based on their meanings, their combination ability, and their syntactic functions. However, Diep (2005) considered the words expressing the whole, such as _ca(all)_, _tat_ca(all)_, _toan_bo(all)_, etc.
|
| 360 |
+
as pronouns, while SCSSV (1983), in contrast, considered
|
| 361 |
+
them as nouns, and Hoang (1998) considered _ca_ as a pronoun and _tat_ca_ as a noun in all contexts.
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
**4.2.** **Building part-of-speech tag set**
|
| 365 |
+
|
| 366 |
+
In previous work, Nguyen et al. (2009) classified the words
|
| 367 |
+
on the basis of their combination ability and syntactic function. They created a POS tag set for Vietnamese including a total of 17 tags (except the tags for unknown words
|
| 368 |
+
and the punctuation). However, this tag set cannot cover
|
| 369 |
+
all the combination abilities as well as the syntactic functions of the Vietnamese words. For example, they used the
|
| 370 |
+
tag _P_ to annotate all pronouns. However, the pronouns used
|
| 371 |
+
to express space or time (demonstrative pronouns) such as
|
| 372 |
+
_nay(this)_ and _do(that)_ can be modifiers of the head nouns in
|
| 373 |
+
noun phrases. Personal pronouns, in contrast, always play
|
| 374 |
+
the roles of the head words of noun phrases.
|
| 375 |
+
Therefore, in this work, we created a new POS tag set
|
| 376 |
+
for Vietnamese. Our criteria to classify the words are also
|
| 377 |
+
based on the combination abilities and the syntactic functions of the words, like those of the VLSP treebank. However, we referred to the linguistics literature, carefully analyzed the roles of words and discussed with our linguistics
|
| 378 |
+
colleagues to create a new POS tag set for Vietnamese with
|
| 379 |
+
33 tags which are shown in Table 3. Using our POS tags,
|
| 380 |
+
we can recognize the role of a word in a phrase or sentence.
|
| 381 |
+
For example, the demonstrative pronouns modifying head
|
| 382 |
+
words of noun phases are annotated with the _Pd_ label, and
|
| 383 |
+
personal pronouns that are head words of noun phrases are
|
| 384 |
+
annotated with the _Pp_ label.
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
**4.3.** **Policy for annotation of part-of-speech**
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
In our POS tagging guidelines, the words are tagged on the
|
| 391 |
+
basis of the following criteria:
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
- Combination ability of the word. For example,
|
| 395 |
+
_kho_khan_ can be understood as _difficulty_ or _difficult_ .
|
| 396 |
+
However, if it is a noun, it cannot combine with the
|
| 397 |
+
adjunct _rat(very)_ . If it is an adjective, it cannot combine
|
| 398 |
+
with the quantifier _nhung(-s/-es)_ .
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
- Syntactic function of the word. For example, if the
|
| 402 |
+
quantifier indicating the whole modifies a noun, it will
|
| 403 |
+
be annotated with an Nw tag. The quantifier indicating
|
| 404 |
+
the whole will be annotated with a Pp tag if it is head
|
| 405 |
+
word of a noun phrase.
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
- Meaning of the word in the sentence. For example, the
|
| 409 |
+
combination ability of the verb _di(to go)_ and the adjective _dep(beautiful)_ mentioned above is the same, they are
|
| 410 |
+
modified by the adjunct _se_ . They also have the same
|
| 411 |
+
syntactic function which is head word of predicates.
|
| 412 |
+
However, their meanings are different: the adjective
|
| 413 |
+
expresses the quality, and the verb expresses the action.
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
In addition, for each tag, the guidelines describe ambiguous
|
| 417 |
+
cases and ways to distinguish among them. There are words
|
| 418 |
+
that give us no clues to determine their POS if we only refer to single sentences as in the case of _bao_cao_ mentioned
|
| 419 |
+
above. In these cases, the contexts of the words can be determined by referring to the surrounding text. Therefore, our
|
| 420 |
+
annotation tool is designed to allow annotators to view the
|
| 421 |
+
text to which the sentence belongs. For the words that give
|
| 422 |
+
us no clues to determine their POS accurately, we decided
|
| 423 |
+
to tag them on the basis of their combination ability, their
|
| 424 |
+
syntactic function, or their meaning in the immediatelypreceding phrase. For example, we tagged _moi_ mentioned
|
| 425 |
+
in Table 2 as an adjective based on its syntactic function
|
| 426 |
+
in the phrase _mot(a) nghien_cuu(research) moi(new) {a new research}_ .
|
| 427 |
+
In Vietnamese, several types of words are still little consensus on how to determine POS tags. For example, emotional
|
| 428 |
+
verbs can be considered as adjectives, while some people
|
| 429 |
+
said that they have two POSs. For these cases, we tagged
|
| 430 |
+
them with double-POS tags so that they can be automatically changed to others.
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
**5.** **Bracketing guidelines**
|
| 434 |
+
|
| 435 |
+
**5.1.** **Representation scheme**
|
| 436 |
+
|
| 437 |
+
Our scheme is built on the basis of the VLSP treebank
|
| 438 |
+
(Nguyen et al., 2009). We use the following four types of labels: constituency labels indicating syntactic categories of
|
| 439 |
+
the phrases, functional labels indicating syntactic functions
|
| 440 |
+
and meanings (if any) of the phrases, null elements to mark
|
| 441 |
+
ellipses, and reference indices to mark syntactic movement.
|
| 442 |
+
We also use the label H to tag the head words of the phrases.
|
| 443 |
+
In addition, we refer to the scheme of English Penn Treebank, the scheme of Chinese Penn Treebank, and linguistics literature to complete the annotation scheme for Vietnamese. For example, Figure 3 shows a Vietnamese sentence that has only a verb phrase. This type of sentence was
|
| 444 |
+
not distinguished from the sentences that have the standard
|
| 445 |
+
structure [8] in the VLSP treebank. In our treebank, the sentences that do not have the standard structure will be bracketed with the label _SPL_ so that we can distinguish them
|
| 446 |
+
from the sentences that include a subject and a predicate,
|
| 447 |
+
which are bracketed with the label _S_ .
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
**5.2.** **Policy for annotation of bracket**
|
| 451 |
+
|
| 452 |
+
In this section, we will discuss two typical confusing cases
|
| 453 |
+
of Vietnamese bracketing. The first case is to differentiate
|
| 454 |
+
between the expressions that have the same POS sequence.
|
| 455 |
+
We classify these expressions into four types shown in Table 4.
|
| 456 |
+
These ambiguities occur for the following two reasons.
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
1. In Vietnamese phrases, the lexical words modifying
|
| 460 |
+
the head words commonly follow the head words.
|
| 461 |
+
However, there are also the adjectives that can come
|
| 462 |
+
before or follow the nouns and the verbs in the noun
|
| 463 |
+
phrases and the verb phrases. This causes the ambiguities for recognizing whether a phrase in which an
|
| 464 |
+
adjective comes before a verb is an adjective phrase
|
| 465 |
+
or a verb phrase, and the phrase in which an adjective
|
| 466 |
+
comes before a noun is an adjective phrase or a noun
|
| 467 |
+
phrase, such as the phrases shown in rows 1 and 2 of
|
| 468 |
+
Table 4.
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
2. The words are not marked with tense, number, case,
|
| 472 |
+
etc. and they are expressed through the adjunct. However, the adjunct is dropped frequently in the text. This
|
| 473 |
+
causes the ambiguities of distinguishing between the
|
| 474 |
+
clauses and the phrases. Row 3 of Table 4 shows two
|
| 475 |
+
ambiguities of distinguishing between sentences and
|
| 476 |
+
phrases.
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
To solve the above ambiguities, we propose the following
|
| 480 |
+
principles:
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
- For a noun phrase and an adjective phrase that have
|
| 484 |
+
the same structure, if the phrase modifies a verb about
|
| 485 |
+
quantity, it is bracketed with an NP (example 1 in Table 5). Conversely, if the phrase modifies a noun about
|
| 486 |
+
quality or is the predicate of the sentence, the phrase
|
| 487 |
+
is bracketed with an ADJP (example 2 in Table 5).
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
- For a verb phrase and an adjective phrase that have the
|
| 491 |
+
same structure, if the words can be inverted without
|
| 492 |
+
changing the meaning, the phrase is annotated with a
|
| 493 |
+
VP label (examples 3 in Table 5). Otherwise, it will
|
| 494 |
+
be bracketed with a VP label if the verb precedes the
|
| 495 |
+
adjective and bracketed with a ADJP label if the verb
|
| 496 |
+
follows the adjective (example 4 in Table 5).
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
- For a clause and a noun phrase in which the noun
|
| 500 |
+
comes before the adjective (as mentioned in example
|
| 501 |
+
3 in Table 4), if we can insert the adjunct indicating
|
| 502 |
+
tense as a pre-modifier of the adjective, the expression
|
| 503 |
+
should be bracketed with an S label (example 5 in Table 5). In contrast, the expression will be bracketed as
|
| 504 |
+
a noun phrase (example 6 in Table 5).
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
- For a clause and a noun phrase in which the noun
|
| 508 |
+
comes before the verb (as mentioned in example 4 in
|
| 509 |
+
Table 4), if the noun is not the subject of the action
|
| 510 |
+
stated by the verb, the expression is bracketed with an
|
| 511 |
+
NP label (example 7 in Table 5).
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
The second confusing case is annotation of the ambiguous sentences that can be bracketed with various structures.
|
| 515 |
+
These ambiguities occur for the following reasons:
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
1. One phrase can be interpreted by different valid structures. Figure 4 is an example for this. In this example, we can understand _hom_qua(yesterday)_ as an adverb phrase modifying the verb _ke(to tell)_ (Figure 4b) or
|
| 519 |
+
a phrase modifying the noun _tai_nan(accident)_ (Figure
|
| 520 |
+
4c).
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
2. Ellipses occur frequently. For example, Diep (2005)
|
| 524 |
+
considered the sentence in Figure 5 as a single sentence, where the expression before the comma is a
|
| 525 |
+
subordinate component of the sentence that expresses
|
| 526 |
+
the manner (Figure 5b). However, this sentence can be
|
| 527 |
+
understood as a subordinate compound sentence (SCSSV, 1983) in which the subject of the first clause is
|
| 528 |
+
dropped because it is the same as the subject of the
|
| 529 |
+
second clause (Figure 5c).
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
3. Many words in Vietnamese were annotated with a
|
| 533 |
+
double-POS tag, which caused ambiguities in selecting the constituent label to bracket them.
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
To disambiguate these cases, we refer to the context to find
|
| 537 |
+
their actual meaning and structure. The cases in which there
|
| 538 |
+
is no clue for disambiguation are bracketed as follows: (1) If
|
| 539 |
+
one phrase can be interpreted by different valid structures,
|
| 540 |
+
the phrase will be bracketed with all valid structures; (2)
|
| 541 |
+
For ambiguities caused by ellipses, we annotate each type
|
| 542 |
+
of ellipsis in such a structure that maintains meaning of the
|
| 543 |
+
sentence. For example, we bracket the sentence in Figure 5
|
| 544 |
+
as a single sentence (Figure 5b); (3) For ambiguities caused
|
| 545 |
+
by the double-POS words, we also bracket each sentence
|
| 546 |
+
with a unique structure. However, the sentences can be converted into other structures on the basis of the POS tags.
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
**6.** **Annotation process and quality control**
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
Although we tried to write the guidelines as completely as
|
| 553 |
+
possible before the annotation process began, revising the
|
| 554 |
+
guidelines during the annotation process is unavoidable because real text is far more complicated than the examples
|
| 555 |
+
mentioned in the literature. Therefore, in this section, we
|
| 556 |
+
will discuss our method to improve the quality of annotation guidelines and to ensure correct and consistent annotation.
|
| 557 |
+
After finishing the drafts of annotation guidelines, we
|
| 558 |
+
trained two annotators and asked the annotators to annotate
|
| 559 |
+
600 texts (about 8,000 sentences) (preliminary annotation).
|
| 560 |
+
In this annotation stage, the annotators were asked to discuss about the constructions which they found difficult to
|
| 561 |
+
annotate because of ambiguities or other reasons. Based on
|
| 562 |
+
these discussions. we revised the guidelines for the instructions that cannot be applied to new data and the constructions that are not covered by the guidelines. After revising
|
| 563 |
+
the guidelines, we retrained annotators with the second version of the guidelines. Then, we carried out nine measurement rounds to calculate inter-annotator agreement scores
|
| 564 |
+
and accuracies. Each round includes the following steps:
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
- We randomly select three texts (about 40 syntactic
|
| 568 |
+
trees) from the results of the preliminary annotation;
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
- Each annotator re-annotates the texts independently;
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
- We compare the annotation results of each annotator
|
| 575 |
+
to the benchmark data annotated by us and those of
|
| 576 |
+
the other annotator;
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
- We discuss with annotators about the annotation errors and the inconsistencies, and revise the annotation
|
| 580 |
+
guidelines (if necessary).
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
Figure 6 shows the inter-annotator agreement scores and the
|
| 584 |
+
accuracies of three annotation layers. The left figure shows
|
| 585 |
+
the agreement between two annotators; the right one shows
|
| 586 |
+
the accuracy of each annotator (denoted by A1 and A2)
|
| 587 |
+
compared to the benchmark data. This figure shows that
|
| 588 |
+
from the sixth round, the agreement ratios and accuracies
|
| 589 |
+
were higher than 90%, which indicates that the annotation
|
| 590 |
+
is reliable.
|
| 591 |
+
After we finished the ninth measurement round, our annotators edited 600 texts. Then, the annotation results of each
|
| 592 |
+
annotator was checked and edited by the other annotator.
|
| 593 |
+
Finally, to clean up the corpus, we ran tools to detect annotation errors. These errors were manual edited by our annotators before our corpus is released.
|
| 594 |
+
Our observations on the inconsistent annotations and errors
|
| 595 |
+
revealed that most of the inconsistencies were caused by
|
| 596 |
+
the ambiguous expressions. There are three main reasons
|
| 597 |
+
for the ambiguous expressions: (1) there is no infection in
|
| 598 |
+
Vietnamese; (2) word order is very flexible; (3) a sentence
|
| 599 |
+
can have many meanings. Figure 7 shows an example that
|
| 600 |
+
we can understand a sentence by two different meanings.
|
| 601 |
+
Although our annotation guidelines contain many examples
|
| 602 |
+
of ambiguous expressions as well as their correct annotations, real texts are complicated. Ambiguous expressions
|
| 603 |
+
appear in various forms and difficult to recognize all structures that can be annotated. Therefore, to achieve a high
|
| 604 |
+
agreement ratio, the annotators need to be trained carefully
|
| 605 |
+
and to practice the annotation more on the basis of real texts
|
| 606 |
+
so that they become familiar with annotation and analyzing
|
| 607 |
+
the texts following the guidelines; the guidelines also need
|
| 608 |
+
to be updated for new constructions throughout the annotation process.
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
**7.** **Conclusion**
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
We have solved the challenges in building a Vietnamese
|
| 615 |
+
treebank, namely, developing WS guidelines, POS tagging
|
| 616 |
+
guidelines, and bracketing guidelines, as well as ensuring
|
| 617 |
+
the annotation consistency and accuracy. Our guidelines
|
| 618 |
+
were developed based on not only the linguistics literature but also the analysis of the linguistic phenomena on
|
| 619 |
+
real texts. Moreover, we discussed with linguistic experts to
|
| 620 |
+
solve the difficulties. So far, we have annotated 600 texts.
|
| 621 |
+
In future, we will annotate the rest of Vietnamese treebank,
|
| 622 |
+
which includes 2,400 texts and revise the guidelines for new
|
| 623 |
+
structures (if any). We plan to complete and publicize the
|
| 624 |
+
annotated corpus and the annotation guidelines at the end
|
| 625 |
+
of 2016.
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
**8.** **Bibliographical References**
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
Bies, A., Ferguson, M., Katz, K., MacIntyre, R., Tredinnick, V., Kim, G., Marcinkiewicz, M. A., and Schasberger, B. (1995). Bracketing guidelines for treebank ii
|
| 632 |
+
style penn treebank project. _University of Pennsylvania_,
|
| 633 |
+
97:100.
|
| 634 |
+
Diep, B. Q. (2005). _Vietnamese grammar_ . Vietnam Education Publisher.
|
| 635 |
+
Hoang, P. (1998). _Vietnamese Dictionary_ . Scientific &
|
| 636 |
+
Technical Publishing.
|
| 637 |
+
Nguyen, T. P., Vu, L. X., Nguyen, H. T. M., Nguyen, H. V.,
|
| 638 |
+
and Le, P. H. (2009). Building a large syntacticallyannotated corpus of vietnamese. In _Proceedings of the_
|
| 639 |
+
_Third Linguistic Annotation Workshop_, pages 182-185.
|
| 640 |
+
Association for Computational Linguistics.
|
| 641 |
+
Nguyen, T. P., Vu, L. X., and Nguyen, H. T. M., (2010a).
|
| 642 |
+
_Vietnamese part-of-speech tagging guidelines_ . Ministry
|
| 643 |
+
of Education and Training (Vietnam), Vietnam.
|
| 644 |
+
Nguyen, T. P., Vu, L. X., and Nguyen, H. T. M., (2010b).
|
| 645 |
+
_Vietnamese word segmentation guidelines_ . Ministry of
|
| 646 |
+
Education and Training (Vietnam), Vietnam.
|
| 647 |
+
Nguyen, T. P., Vu, L. X., Nguyen, H. T. M., Dao, T. M.,
|
| 648 |
+
Dao, N. T. M., and Le, N. K., (2010c). _Vietnamese bracketing guidelines_ . Ministry of Education and Training
|
| 649 |
+
(Vietnam), Vietnam.
|
| 650 |
+
Nguyen, Q. T., Nguyen, N. L., and Miyao, Y. (2012). Comparing different criteria for vietnamese word segmentation. In _Proceedings of 3rd Workshop on South and
|
| 651 |
+
Southeast Asian Natural Language Processing (SANLP)_,
|
| 652 |
+
pages 53-68. Citeseer.
|
| 653 |
+
Nguyen, Q. T., Nguyen, N. L., and Miyao, Y. (2013). Utilizing state-of-the-art parsers to diagnose problems in
|
| 654 |
+
treebank annotation for a less resourced language. In
|
| 655 |
+
_Proceedings of the 7th Linguistic Annotation Workshop
|
| 656 |
+
& Interoperability with Discourse_, pages 19-27. Association for Computational Linguistics.
|
| 657 |
+
Santorini, B. (1990). Part-of-speech tagging guidelines for
|
| 658 |
+
the penn treebank project (3rd revision).
|
| 659 |
+
Sciullo, A.-M. D. and Williams, E. (1987). _On the definition of word_, volume 14. Springer.
|
| 660 |
+
SCSSV. (1983). _Vietnamese grammar_ . Social Sciences
|
| 661 |
+
Publishers.
|
| 662 |
+
Sornlertlamvanich, V., Takahashi, N., and Isahara, H.
|
| 663 |
+
(1999). Building a thai part-of-speech tagged corpus (orchid). _Journal of the Acoustical Society of Japan (E)_,
|
| 664 |
+
20(3):189-198.
|
| 665 |
+
Xia, F., Palmer, M., Xue, N., Okurowski, M. E., Kovarik,
|
| 666 |
+
J., Chiou, F.-D., Huang, S., Kroch, T., and Marcus, M. P.
|
| 667 |
+
(2000). Developing guidelines and ensuring consistency
|
| 668 |
+
for chinese text annotation. In _LREC_ .
|
| 669 |
+
Xia, F. (2000a). The part-of-speech tagging guidelines for
|
| 670 |
+
the penn chinese treebank (3.0).
|
| 671 |
+
Xia, F. (2000b). The segmentation guidelines for the penn
|
| 672 |
+
chinese treebank (3.0).
|
| 673 |
+
Xue, N., Xia, F., Huang, S., and Kroch, A. (2000). The
|
| 674 |
+
bracketing guidelines for the penn chinese treebank (3.0).
|
references/2016.lrec.nguyen/paper.pdf
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
references/2018.lre.nguyen/paper.md
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: "Ensuring annotation consistency and accuracy for Vietnamese treebank"
|
| 3 |
+
authors:
|
| 4 |
+
- Quy T. Nguyen
|
| 5 |
+
- Yusuke Miyao
|
| 6 |
+
- Ha T. T. Le
|
| 7 |
+
- Nhung T. H. Nguyen
|
| 8 |
+
year: 2018
|
| 9 |
+
venue: "Language Resources and Evaluation"
|
| 10 |
+
publisher: "Springer Netherlands"
|
| 11 |
+
volume: 52
|
| 12 |
+
issue: 1
|
| 13 |
+
pages: "269-315"
|
| 14 |
+
doi: "10.1007/s10579-017-9398-3"
|
| 15 |
+
url: "https://link.springer.com/article/10.1007/s10579-017-9398-3"
|
| 16 |
+
online_date: "2017-07-22"
|
| 17 |
+
cover_date: "2018-03-01"
|
| 18 |
+
issn: "1574-0218"
|
| 19 |
+
article_type: "Original Paper"
|
| 20 |
+
access: "paywalled"
|
| 21 |
+
conference_version:
|
| 22 |
+
title: "Challenges and Solutions for Consistent Annotation of Vietnamese Treebank"
|
| 23 |
+
venue: "LREC 2016"
|
| 24 |
+
year: 2016
|
| 25 |
+
acl_anthology: "L16-1243"
|
| 26 |
+
url: "https://aclanthology.org/L16-1243/"
|
| 27 |
+
local_path: "references/2016.lrec.nguyen/"
|
| 28 |
+
author_affiliations:
|
| 29 |
+
- name: "Quy T. Nguyen"
|
| 30 |
+
email: "quynt@nii.ac.jp"
|
| 31 |
+
affiliations:
|
| 32 |
+
- "SOKENDAI (The Graduate University for Advanced Studies), Kanagawa, Japan"
|
| 33 |
+
- "National Institute of Informatics, Tokyo, Japan"
|
| 34 |
+
- name: "Yusuke Miyao"
|
| 35 |
+
email: "yusuke@nii.ac.jp"
|
| 36 |
+
affiliations:
|
| 37 |
+
- "SOKENDAI (The Graduate University for Advanced Studies), Kanagawa, Japan"
|
| 38 |
+
- "National Institute of Informatics, Tokyo, Japan"
|
| 39 |
+
- name: "Ha T. T. Le"
|
| 40 |
+
email: "trucha.ussh@gmail.com"
|
| 41 |
+
affiliations:
|
| 42 |
+
- "University of Social Sciences and Humanities, Ho Chi Minh City, Vietnam"
|
| 43 |
+
- name: "Nhung T. H. Nguyen"
|
| 44 |
+
email: "nthnhung@fit.hcmus.edu.vn"
|
| 45 |
+
affiliations:
|
| 46 |
+
- "University of Science, Ho Chi Minh City, Vietnam"
|
| 47 |
+
subjects:
|
| 48 |
+
- "Computational Linguistics"
|
| 49 |
+
- "Computer Science, general"
|
| 50 |
+
- "Linguistics, general"
|
| 51 |
+
- "Language and Literature"
|
| 52 |
+
dblp: "journals/lre/NguyenMLN18"
|
| 53 |
+
semantic_scholar_id: "13ae589e1ee3dea5600f75cde4105fdc4d206bb0"
|
| 54 |
+
---
|
| 55 |
+
|
| 56 |
+
# Ensuring annotation consistency and accuracy for Vietnamese treebank
|
| 57 |
+
|
| 58 |
+
**Quy T. Nguyen, Yusuke Miyao, Ha T. T. Le, Nhung T. H. Nguyen**
|
| 59 |
+
|
| 60 |
+
*Language Resources and Evaluation*, 52(1), 269-315, 2018.
|
| 61 |
+
|
| 62 |
+
## Abstract
|
| 63 |
+
|
| 64 |
+
Treebanks are important resources for researchers in natural language processing. They provide training and testing materials so that different algorithms can be compared. However, it is not a trivial task to construct high-quality treebanks. We have not yet had a proper treebank for such a low-resource language as Vietnamese, which has probably lowered the performance of Vietnamese language processing. We have been building a consistent and accurate Vietnamese treebank to alleviate such situations. Our treebank is annotated with three layers: word segmentation, part-of-speech tagging, and bracketing. We developed detailed annotation guidelines for each layer by presenting Vietnamese linguistic issues as well as methods of addressing them. Here, we also describe approaches to controlling annotation quality while ensuring a reasonable annotation speed. We specifically designed an appropriate annotation process and an effective process to train annotators. In addition, we implemented several support tools to improve annotation speed and to control the consistency of the treebank. The results from experiments revealed that both inter-annotator agreement and accuracy were higher than 90%, which indicated that the treebank is reliable.
|
| 65 |
+
|
| 66 |
+
## Notes
|
| 67 |
+
|
| 68 |
+
This paper is behind a paywall (Springer). The full text PDF is not available via open access.
|
| 69 |
+
|
| 70 |
+
The LREC 2016 conference version of this paper is available at `references/2016.lrec.nguyen/` and can be freely accessed at https://aclanthology.org/L16-1243/. The journal version is an extended version of the conference paper, published in *Language Resources and Evaluation* (Springer) in 2018.
|
| 71 |
+
|
| 72 |
+
The associated treebank (NIIVTB - Vietnamese Constituent Treebank, 20,588 sentences) is available at https://github.com/mynlp/niivtb under CC BY-NC-SA 4.0 license.
|
references/research_vietnamese_dep_parsing/README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Literature Review: Building Vietnamese Dependency Parsing Datasets
|
| 2 |
+
|
| 3 |
+
**Date**: 2026-02-08
|
| 4 |
+
**Research Questions**:
|
| 5 |
+
- RQ1: What Vietnamese dependency treebanks exist and what are their characteristics?
|
| 6 |
+
- RQ2: What methods are used for automatic/semi-automatic treebank construction?
|
| 7 |
+
- RQ3: What is the state-of-the-art in Vietnamese dependency parsing?
|
| 8 |
+
- RQ4: How are silver-standard UD treebanks created and validated?
|
| 9 |
+
|
| 10 |
+
## Executive Summary
|
| 11 |
+
|
| 12 |
+
Vietnamese remains underserved in the Universal Dependencies ecosystem, with only one official treebank (UD_Vietnamese-VTB, 3,323 sentences). Several non-UD treebanks exist (VnDT: 10,200 sentences; BKTreebank: 6,900 sentences) but all are restricted to the news domain. State-of-the-art Vietnamese dependency parsing achieves ~79% LAS (PhoNLP with PhoBERT), limited by small training data and domain coverage. Machine-generated (silver-standard) treebanks are an established practice in the UD community for scaling annotation efforts. Recent work on domain generalization (DGDT, 2024) shows 3-5% LAS degradation across domains, motivating multi-domain treebank construction.
|
| 13 |
+
|
| 14 |
+
## Methodology
|
| 15 |
+
|
| 16 |
+
- **Search sources**: ACL Anthology, arXiv, Semantic Scholar, Google Scholar, IEEE Xplore, Springer
|
| 17 |
+
- **Search terms**: "Vietnamese dependency parsing", "Vietnamese treebank", "Universal Dependencies Vietnamese", "VLSP dependency parsing", "silver standard treebank", "automatic annotation dependency", "low-resource treebank construction", "Vietnamese NLP"
|
| 18 |
+
- **Timeframe**: 2009-2026
|
| 19 |
+
- **Inclusion criteria**: Peer-reviewed papers at NLP/CL venues, relevant preprints, official UD resources
|
| 20 |
+
|
| 21 |
+
## PRISMA Flow
|
| 22 |
+
|
| 23 |
+
- Records identified: ~120
|
| 24 |
+
- Duplicates removed: ~40
|
| 25 |
+
- Records screened: ~80
|
| 26 |
+
- Records excluded: ~37
|
| 27 |
+
- Full-text assessed: 43
|
| 28 |
+
- **Studies included: 43**
|
| 29 |
+
|
| 30 |
+
## Findings
|
| 31 |
+
|
| 32 |
+
### RQ1: What Vietnamese dependency treebanks exist?
|
| 33 |
+
|
| 34 |
+
Five Vietnamese dependency treebanks have been constructed:
|
| 35 |
+
|
| 36 |
+
| Treebank | Sentences | Tokens | Domain | Format | Annotation | Year |
|
| 37 |
+
|----------|----------:|-------:|--------|--------|------------|------|
|
| 38 |
+
| UD_Vietnamese-VTB | 3,323 | 58,069 | News | CoNLL-U | Auto-converted from constituency | 2017 |
|
| 39 |
+
| VnDT v1.1 | 10,200 | -- | News | CoNLL | Auto-converted from constituency | 2014/2018 |
|
| 40 |
+
| BKTreebank | 6,900 | -- | News | CoNLL | Manual (3 annotators, BRAT) | 2018 |
|
| 41 |
+
| VLSP 2020 dataset | 8,152 | -- | Mixed | CoNLL-U | -- | 2020 |
|
| 42 |
+
| DGDT | -- | -- | Multi-domain | CoNLL-U | -- | 2025 |
|
| 43 |
+
|
| 44 |
+
Key observations:
|
| 45 |
+
- All major treebanks are restricted to the **news domain**
|
| 46 |
+
- Only UD_Vietnamese-VTB follows UD annotation conventions
|
| 47 |
+
- VnDT is the primary benchmark for Vietnamese dependency parsing
|
| 48 |
+
- Recent DGDT work (2025) addresses domain generalization
|
| 49 |
+
|
| 50 |
+
### RQ2: What methods are used for treebank construction?
|
| 51 |
+
|
| 52 |
+
Four main approaches documented in the literature:
|
| 53 |
+
|
| 54 |
+
1. **Constituency-to-dependency conversion**: VnDT and UD_Vietnamese-VTB were created by converting VLSP constituency treebanks. Le-Hong et al. (2013) and recent work (2022) propose improved head-percolation rules achieving ~13% UAS improvement.
|
| 55 |
+
|
| 56 |
+
2. **Manual annotation**: BKTreebank used 3 annotators with BRAT tool, achieving inter-annotator agreement above 90% (Le-Hong et al., 2015; Nguyen et al., 2016).
|
| 57 |
+
|
| 58 |
+
3. **Cross-lingual projection**: Chinese-Vietnamese bilingual alignment used to project annotations from resource-rich Chinese (2016).
|
| 59 |
+
|
| 60 |
+
4. **Silver-standard / machine-generated**: UD-English-CHILDES demonstrates 1M-sentence silver treebank creation using Stanza. BERT-based sentence recommendation (FDSE 2023) helps prioritize sentences likely to be parsed with high LAS.
|
| 61 |
+
|
| 62 |
+
Semi-automatic construction (bootstrapping) is established best practice: automatic parser output followed by manual correction, increasing automation as annotated data grows.
|
| 63 |
+
|
| 64 |
+
### RQ3: What is the state-of-the-art in Vietnamese dependency parsing?
|
| 65 |
+
|
| 66 |
+
**VnDT v1.1 benchmark (primary)**:
|
| 67 |
+
|
| 68 |
+
| Model | LAS | UAS | Year |
|
| 69 |
+
|-------|----:|----:|------|
|
| 70 |
+
| PhoNLP (PhoBERT-base, multi-task) | 79.11 | 85.47 | 2021 |
|
| 71 |
+
| HPSG + PhoBERT large | 78.42 | 85.73 | 2024 |
|
| 72 |
+
| PhoBERT-base (single-task Biaffine) | 78.77 | 85.22 | 2020 |
|
| 73 |
+
| Biaffine (Dozat & Manning) | 74.99 | 81.19 | 2017 |
|
| 74 |
+
| VnCoreNLP | 73.39 | 79.02 | 2018 |
|
| 75 |
+
|
| 76 |
+
**VLSP 2020 UD format**:
|
| 77 |
+
|
| 78 |
+
| Model | LAS | UAS |
|
| 79 |
+
|-------|----:|----:|
|
| 80 |
+
| PhoBERT+ELMO/Biaffine | 76.27 | 84.65 |
|
| 81 |
+
| Graph Neural Networks | 73.19 | 81.71 |
|
| 82 |
+
|
| 83 |
+
**UD_Vietnamese-VTB**:
|
| 84 |
+
|
| 85 |
+
| Model | LAS | UAS |
|
| 86 |
+
|-------|----:|----:|
|
| 87 |
+
| Trankit v0.3.1 | 64.76 | 70.96 |
|
| 88 |
+
| Stanza v1.1.1 | 53.63 | 48.16 |
|
| 89 |
+
|
| 90 |
+
Key trends: (1) PhoBERT-based models dominate; (2) Multi-task learning (PhoNLP) outperforms single-task; (3) Performance on UD_Vietnamese-VTB is much lower than VnDT, suggesting annotation quality issues.
|
| 91 |
+
|
| 92 |
+
### RQ4: How are silver-standard UD treebanks created and validated?
|
| 93 |
+
|
| 94 |
+
The UD community uses a multi-level validation system (Levels 1-5):
|
| 95 |
+
- Level 1: CoNLL-U format compliance
|
| 96 |
+
- Level 2: Token-level annotation constraints
|
| 97 |
+
- Level 3: Tree structure validation (single root, projectivity)
|
| 98 |
+
- Level 4: UPOS-deprel compatibility
|
| 99 |
+
- Level 5: Language-specific constraints
|
| 100 |
+
|
| 101 |
+
Silver-standard treebanks are validated using the official UD validator (`udtools`). Quality assessment methods include: entropy-based error detection (Le-Hong et al., 2015), automatic error detection via equivalence classes (2014), and BERT-based sentence recommendation for prioritizing high-confidence parses (2023).
|
| 102 |
+
|
| 103 |
+
## Research Gaps
|
| 104 |
+
|
| 105 |
+
1. **Domain coverage**: All Vietnamese UD treebanks cover only news. Legal, medical, scientific, and literary domains are unrepresented.
|
| 106 |
+
2. **Scale**: The official UD_Vietnamese-VTB has only 3,323 sentences--among the smallest in UD.
|
| 107 |
+
3. **Annotation quality**: Known issues with automatic conversion from constituency to UD format (noted in CoNLL 2018 shared task).
|
| 108 |
+
4. **Silver treebank methodology for Vietnamese**: No established pipeline for generating and validating silver-standard Vietnamese UD data at scale.
|
| 109 |
+
5. **Morphological features**: Vietnamese treebanks typically lack FEATS annotation despite having features like Polarity, Mood, Voice.
|
| 110 |
+
|
| 111 |
+
## Recommendations
|
| 112 |
+
|
| 113 |
+
1. **Build multi-domain Vietnamese UD treebanks** starting with underrepresented domains (legal, scientific).
|
| 114 |
+
2. **Use machine-generated annotations with rule-based post-processing** to enforce UD constraints, following the UD-English-CHILDES silver standard methodology.
|
| 115 |
+
3. **Conduct human evaluation** through expert sampling to quantify silver treebank quality.
|
| 116 |
+
4. **Contribute to the official UD project** to increase Vietnamese representation beyond a single treebank.
|
| 117 |
+
|
| 118 |
+
## References
|
| 119 |
+
|
| 120 |
+
See [papers.md](papers.md) for the full paper database and [bibliography.bib](bibliography.bib) for BibTeX entries.
|
references/research_vietnamese_dep_parsing/bibliography.bib
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
% Bibliography: Vietnamese Dependency Parsing Datasets
|
| 2 |
+
% Generated: 2026-02-08
|
| 3 |
+
% 43 entries organized by category
|
| 4 |
+
|
| 5 |
+
% ============================================
|
| 6 |
+
% A. Vietnamese Dependency Treebank Resources
|
| 7 |
+
% ============================================
|
| 8 |
+
|
| 9 |
+
@inproceedings{nguyen2014vndt,
|
| 10 |
+
title={From Treebank Conversion to Automatic Dependency Parsing for {V}ietnamese},
|
| 11 |
+
author={Nguyen, Dat Quoc and Nguyen, Dai Quoc and Pham, Son Bao and Nguyen, Phuong-Thai and Nguyen, Minh Le},
|
| 12 |
+
booktitle={Proceedings of the 19th International Conference on Applications of Natural Language to Information Systems (NLDB)},
|
| 13 |
+
pages={196--207},
|
| 14 |
+
year={2014},
|
| 15 |
+
publisher={Springer},
|
| 16 |
+
doi={10.1007/978-3-319-07983-7_26}
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
@inproceedings{nguyen2018bktreebank,
|
| 20 |
+
title={{BKTreebank}: Building a {V}ietnamese Dependency Treebank},
|
| 21 |
+
author={Nguyen, Kiem-Hieu},
|
| 22 |
+
booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
|
| 23 |
+
pages={2320--2326},
|
| 24 |
+
year={2018},
|
| 25 |
+
address={Miyazaki, Japan},
|
| 26 |
+
publisher={European Language Resources Association},
|
| 27 |
+
url={https://aclanthology.org/L18-1341/}
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
@misc{ud_vietnamese_vtb,
|
| 31 |
+
title={{UD}_{V}ietnamese-{VTB}},
|
| 32 |
+
author={{VLSP Project contributors}},
|
| 33 |
+
year={2017},
|
| 34 |
+
howpublished={Universal Dependencies project},
|
| 35 |
+
url={https://universaldependencies.org/treebanks/vi_vtb/index.html}
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
@inproceedings{lehong2013building,
|
| 39 |
+
title={Building a Treebank for {V}ietnamese Dependency Parsing},
|
| 40 |
+
author={Le-Hong, Phuong and Nguyen, Thi Minh Huyen and Roussanaly, Azim and Ho, Tuong Vinh},
|
| 41 |
+
booktitle={Proceedings of the IEEE International Conference on Knowledge and Systems Engineering (KSE)},
|
| 42 |
+
pages={74--79},
|
| 43 |
+
year={2013},
|
| 44 |
+
publisher={IEEE},
|
| 45 |
+
doi={10.1109/KSE.2013.6719884}
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
@inproceedings{huynh2024dgdt,
|
| 49 |
+
title={Domain Generalization in {V}ietnamese Dependency Parsing},
|
| 50 |
+
author={Huynh, D. and Le, V.H. and Truong, C.A. and Huynh, C.M. and Nguyen, Y.T. and Nguyen, Q.T.},
|
| 51 |
+
booktitle={Proceedings of SOICT 2024},
|
| 52 |
+
series={CCIS},
|
| 53 |
+
volume={2350},
|
| 54 |
+
year={2024},
|
| 55 |
+
publisher={Springer},
|
| 56 |
+
doi={10.1007/978-981-96-4282-3_14}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
% ============================================
|
| 60 |
+
% B. Vietnamese Treebank Quality and Construction
|
| 61 |
+
% ============================================
|
| 62 |
+
|
| 63 |
+
@article{lehong2015entropy,
|
| 64 |
+
title={{V}ietnamese Treebank Construction and Entropy-based Error Detection},
|
| 65 |
+
author={Le-Hong, Phuong and Nguyen, Thi Minh Huyen and others},
|
| 66 |
+
journal={Language Resources and Evaluation},
|
| 67 |
+
volume={49},
|
| 68 |
+
pages={487--518},
|
| 69 |
+
year={2015},
|
| 70 |
+
publisher={Springer},
|
| 71 |
+
doi={10.1007/s10579-015-9308-5}
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
@inproceedings{nguyen2016challenges,
|
| 75 |
+
title={Challenges and Solutions for Consistent Annotation of {V}ietnamese Treebank},
|
| 76 |
+
author={Nguyen, Quy and Miyao, Yusuke and Le, Ha and Nguyen, Ngan},
|
| 77 |
+
booktitle={Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
|
| 78 |
+
pages={1449--1454},
|
| 79 |
+
year={2016},
|
| 80 |
+
address={Portoro\v{z}, Slovenia},
|
| 81 |
+
url={https://aclanthology.org/L16-1243/}
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
@inproceedings{automatic2014detection,
|
| 85 |
+
title={Automatic Detection of Problematic Rules in {V}ietnamese Treebank},
|
| 86 |
+
author={{VNU researchers}},
|
| 87 |
+
booktitle={Proceedings of the IEEE Conference},
|
| 88 |
+
year={2014},
|
| 89 |
+
publisher={IEEE},
|
| 90 |
+
doi={10.1109/7049867}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
@inproceedings{converting2022constituency,
|
| 94 |
+
title={Converting a Constituency Treebank to Dependency Treebank for {V}ietnamese},
|
| 95 |
+
author={{Vietnamese researchers}},
|
| 96 |
+
booktitle={Proceedings of the IEEE Conference},
|
| 97 |
+
year={2022},
|
| 98 |
+
publisher={IEEE},
|
| 99 |
+
doi={10.1109/10013806}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
@inproceedings{bert2023sentence,
|
| 103 |
+
title={{BERT}-Based Sentence Recommendation for Building {V}ietnamese {UD} Treebank},
|
| 104 |
+
author={{Vietnamese researchers}},
|
| 105 |
+
booktitle={Proceedings of FDSE 2023},
|
| 106 |
+
year={2023},
|
| 107 |
+
publisher={Springer},
|
| 108 |
+
doi={10.1007/978-981-99-8296-7_28}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
@inproceedings{chinese2016vietnamese,
|
| 112 |
+
title={Building {V}ietnamese Dependency Treebank Based on {C}hinese-{V}ietnamese Bilingual Word Alignment},
|
| 113 |
+
author={{Chinese-Vietnamese NLP researchers}},
|
| 114 |
+
booktitle={Proceedings of the IEEE Conference},
|
| 115 |
+
year={2016},
|
| 116 |
+
publisher={IEEE},
|
| 117 |
+
doi={10.1109/7603371}
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
% ============================================
|
| 121 |
+
% C. VLSP Shared Tasks
|
| 122 |
+
% ============================================
|
| 123 |
+
|
| 124 |
+
@inproceedings{ha2020vlsp,
|
| 125 |
+
title={{VLSP} 2020 Shared Task: Universal Dependency Parsing for {V}ietnamese},
|
| 126 |
+
author={Ha, My Linh and Nguyen, Thi Minh Huyen and Vu, Xuan Luong and Nguyen, Thi Luong and Phan, Thi Hue and Le, Van Cuong},
|
| 127 |
+
booktitle={Proceedings of the 7th International Workshop on Vietnamese Language and Speech Processing (VLSP 2020)},
|
| 128 |
+
year={2020},
|
| 129 |
+
url={https://aclanthology.org/2020.vlsp-1.15/}
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
@inproceedings{doan2020joint,
|
| 133 |
+
title={A Joint Deep Contextualized Word Representation for Deep Biaffine Dependency Parsing},
|
| 134 |
+
author={Doan, Xuan-Dung},
|
| 135 |
+
booktitle={Proceedings of VLSP 2020},
|
| 136 |
+
year={2020},
|
| 137 |
+
url={https://aclanthology.org/2020.vlsp-1.10/}
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
@inproceedings{nguyen2020gnn,
|
| 141 |
+
title={Applying Graph Neural Networks for {V}ietnamese Dependency Parsing},
|
| 142 |
+
author={Nguyen, Duc Thien and Nguyen, Thi Thu Trang and Truong, Dang Quang},
|
| 143 |
+
booktitle={Proceedings of VLSP 2020},
|
| 144 |
+
year={2020},
|
| 145 |
+
url={https://aclanthology.org/2020.vlsp-1.11/}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
@inproceedings{nguyen2020bilstm,
|
| 149 |
+
title={Implementing Bi-{LSTM}-based Deep Biaffine Neural Dependency Parser for {V}ietnamese},
|
| 150 |
+
author={Nguyen, Lien},
|
| 151 |
+
booktitle={Proceedings of VLSP 2020},
|
| 152 |
+
year={2020},
|
| 153 |
+
url={https://aclanthology.org/2020.vlsp-1.12/}
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
% ============================================
|
| 157 |
+
% D. Vietnamese Dependency Parsing Methods
|
| 158 |
+
% ============================================
|
| 159 |
+
|
| 160 |
+
@inproceedings{nguyen2016empirical,
|
| 161 |
+
title={An Empirical Study for {V}ietnamese Dependency Parsing},
|
| 162 |
+
author={Nguyen, Dat Quoc and Dras, Mark and Johnson, Mark},
|
| 163 |
+
booktitle={Proceedings of the Australasian Language Technology Association Workshop (ALTA 2016)},
|
| 164 |
+
pages={143--149},
|
| 165 |
+
year={2016},
|
| 166 |
+
address={Melbourne, Australia},
|
| 167 |
+
url={https://aclanthology.org/U16-1017/}
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
@article{nguyen2019error,
|
| 171 |
+
title={Error Analysis for {V}ietnamese Dependency Parsing},
|
| 172 |
+
author={Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy},
|
| 173 |
+
journal={arXiv preprint arXiv:1911.03724},
|
| 174 |
+
year={2019},
|
| 175 |
+
url={https://arxiv.org/abs/1911.03724}
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
@article{nguyen2019supertag,
|
| 179 |
+
title={{V}ietnamese Transition-based Dependency Parsing with Supertag Features},
|
| 180 |
+
author={Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy},
|
| 181 |
+
journal={arXiv preprint arXiv:1911.03726},
|
| 182 |
+
year={2019},
|
| 183 |
+
url={https://arxiv.org/abs/1911.03726}
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
@article{nguyen2019easyfirst,
|
| 187 |
+
title={{LSTM} Easy-first Dependency Parsing with Pre-trained Word Embeddings in {V}ietnamese},
|
| 188 |
+
author={Nguyen, Binh Duc and others},
|
| 189 |
+
journal={arXiv preprint arXiv:1910.13732},
|
| 190 |
+
year={2019},
|
| 191 |
+
url={https://arxiv.org/abs/1910.13732}
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
@article{nguyen2018bilstm,
|
| 195 |
+
title={Using {BiLSTM} in Dependency Parsing for {V}ietnamese},
|
| 196 |
+
author={Nguyen, Thi My and others},
|
| 197 |
+
journal={Computaci\'{o}n y Sistemas},
|
| 198 |
+
volume={22},
|
| 199 |
+
number={3},
|
| 200 |
+
pages={853--862},
|
| 201 |
+
year={2018},
|
| 202 |
+
url={https://www.scielo.org.mx/scielo.php?script=sci_arttext&pid=S1405-55462018000300853}
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
@inproceedings{nguyen2019joint,
|
| 206 |
+
title={A Neural Joint Model for {V}ietnamese Word Segmentation, {POS} Tagging and Dependency Parsing},
|
| 207 |
+
author={Nguyen, Dat Quoc},
|
| 208 |
+
booktitle={Proceedings of the Australasian Language Technology Association Workshop (ALTA 2019)},
|
| 209 |
+
pages={28--34},
|
| 210 |
+
year={2019},
|
| 211 |
+
address={Sydney, Australia},
|
| 212 |
+
url={https://aclanthology.org/U19-1004/}
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
@article{nguyen2024hpsg,
|
| 216 |
+
title={An Attempt to Develop a Neural Parser based on Simplified {HPSG} on {V}ietnamese},
|
| 217 |
+
author={Nguyen, Duc-Vu and Phan, Thang Chau and Nguyen, Quoc-Nam and Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy},
|
| 218 |
+
journal={arXiv preprint arXiv:2411.17270},
|
| 219 |
+
year={2024},
|
| 220 |
+
url={https://arxiv.org/abs/2411.17270}
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
@inproceedings{tran2021crosslingual,
|
| 224 |
+
title={Adapting Cross-lingual Model to Improve {V}ietnamese Dependency Parsing},
|
| 225 |
+
author={Tran, Anh Duc Do and Dinh, Dien and Luong, An-Vinh and Do, Thao},
|
| 226 |
+
booktitle={Proceedings of ICABDE 2021},
|
| 227 |
+
year={2021},
|
| 228 |
+
publisher={Springer},
|
| 229 |
+
url={https://researchportal.bath.ac.uk/en/publications/adapting-cross-lingual-model-to-improve-vietnamese-dependency-par}
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
% ============================================
|
| 233 |
+
% E. Vietnamese NLP Toolkits
|
| 234 |
+
% ============================================
|
| 235 |
+
|
| 236 |
+
@inproceedings{vu2018vncorenlp,
|
| 237 |
+
title={{VnCoreNLP}: A {V}ietnamese Natural Language Processing Toolkit},
|
| 238 |
+
author={Vu, Thanh and Nguyen, Dat Quoc and Nguyen, Dai Quoc and Dras, Mark and Johnson, Mark},
|
| 239 |
+
booktitle={Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
|
| 240 |
+
pages={56--60},
|
| 241 |
+
year={2018},
|
| 242 |
+
url={https://aclanthology.org/N18-5012/}
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
@article{nguyen2021phonlp,
|
| 246 |
+
title={{PhoNLP}: A Joint Multi-task Learning Model for {V}ietnamese Part-of-speech Tagging, Named Entity Recognition and Dependency Parsing},
|
| 247 |
+
author={Nguyen, Linh The and Nguyen, Dat Quoc},
|
| 248 |
+
journal={arXiv preprint arXiv:2101.01476},
|
| 249 |
+
year={2021},
|
| 250 |
+
url={https://arxiv.org/abs/2101.01476}
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
@inproceedings{nguyen2020phobert,
|
| 254 |
+
title={{PhoBERT}: Pre-trained Language Models for {V}ietnamese},
|
| 255 |
+
author={Nguyen, Dat Quoc and Nguyen, Anh Tuan},
|
| 256 |
+
booktitle={Findings of the Association for Computational Linguistics: EMNLP 2020},
|
| 257 |
+
pages={1037--1042},
|
| 258 |
+
year={2020},
|
| 259 |
+
url={https://aclanthology.org/2020.findings-emnlp.92/}
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
@inproceedings{qi2020stanza,
|
| 263 |
+
title={{S}tanza: A {P}ython Natural Language Processing Toolkit for Many Human Languages},
|
| 264 |
+
author={Qi, Peng and Zhang, Yuhao and Zhang, Yuhui and Bolton, Jason and Manning, Christopher D.},
|
| 265 |
+
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations},
|
| 266 |
+
pages={101--108},
|
| 267 |
+
year={2020},
|
| 268 |
+
url={https://aclanthology.org/2020.acl-demos.14/}
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
@inproceedings{nguyen2021trankit,
|
| 272 |
+
title={{T}rankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing},
|
| 273 |
+
author={Nguyen, Minh Van and Lai, Viet Dac and Pouran Ben Veyseh, Amir and Nguyen, Thien Huu},
|
| 274 |
+
booktitle={Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations},
|
| 275 |
+
pages={80--90},
|
| 276 |
+
year={2021},
|
| 277 |
+
url={https://aclanthology.org/2021.eacl-demos.10/}
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
% ============================================
|
| 281 |
+
% F. Vietnamese Semantic Parsing
|
| 282 |
+
% ============================================
|
| 283 |
+
|
| 284 |
+
@article{do2021semantic,
|
| 285 |
+
title={Sentential Semantic Dependency Parsing for {V}ietnamese},
|
| 286 |
+
author={Do, T.T. and Nguyen, D.T.},
|
| 287 |
+
journal={SN Computer Science},
|
| 288 |
+
volume={2},
|
| 289 |
+
pages={335},
|
| 290 |
+
year={2021},
|
| 291 |
+
publisher={Springer},
|
| 292 |
+
doi={10.1007/s42979-021-00715-4}
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
@inproceedings{vlo2021,
|
| 296 |
+
title={{VLO} V1.1 -- A {V}ietnamese Lexicon Ontology for Universal Dependency Parsing},
|
| 297 |
+
author={{Vietnamese researchers}},
|
| 298 |
+
booktitle={Proceedings of the IEEE Conference},
|
| 299 |
+
year={2021},
|
| 300 |
+
publisher={IEEE},
|
| 301 |
+
doi={10.1109/9353080}
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
% ============================================
|
| 305 |
+
% G. UD Framework and CoNLL Shared Tasks
|
| 306 |
+
% ============================================
|
| 307 |
+
|
| 308 |
+
@inproceedings{nivre2020udv2,
|
| 309 |
+
title={Universal Dependencies v2: An Evergrowing Multilingual Treebank Collection},
|
| 310 |
+
author={Nivre, Joakim and others},
|
| 311 |
+
booktitle={Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020)},
|
| 312 |
+
pages={4034--4043},
|
| 313 |
+
year={2020},
|
| 314 |
+
address={Marseille, France},
|
| 315 |
+
url={https://aclanthology.org/2020.lrec-1.497/}
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
@inproceedings{nivre2016udv1,
|
| 319 |
+
title={Universal Dependencies v1: A Multilingual Treebank Collection},
|
| 320 |
+
author={Nivre, Joakim and others},
|
| 321 |
+
booktitle={Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
|
| 322 |
+
pages={1659--1666},
|
| 323 |
+
year={2016},
|
| 324 |
+
url={https://aclanthology.org/L16-1262/}
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
@article{demarneffe2021ud,
|
| 328 |
+
title={Universal Dependencies},
|
| 329 |
+
author={de Marneffe, Marie-Catherine and Manning, Christopher D. and Nivre, Joakim and Zeman, Daniel},
|
| 330 |
+
journal={Computational Linguistics},
|
| 331 |
+
volume={47},
|
| 332 |
+
number={2},
|
| 333 |
+
pages={255--308},
|
| 334 |
+
year={2021},
|
| 335 |
+
url={https://direct.mit.edu/coli/article/47/2/255/98516/Universal-Dependencies}
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
@inproceedings{zeman2017conll,
|
| 339 |
+
title={{CoNLL} 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies},
|
| 340 |
+
author={Zeman, Daniel and others},
|
| 341 |
+
booktitle={Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies},
|
| 342 |
+
pages={1--19},
|
| 343 |
+
year={2017},
|
| 344 |
+
address={Vancouver, Canada},
|
| 345 |
+
url={https://aclanthology.org/K17-3001/}
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
@inproceedings{zeman2018conll,
|
| 349 |
+
title={{CoNLL} 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies},
|
| 350 |
+
author={Zeman, Daniel and others},
|
| 351 |
+
booktitle={Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies},
|
| 352 |
+
pages={1--21},
|
| 353 |
+
year={2018},
|
| 354 |
+
address={Brussels, Belgium},
|
| 355 |
+
url={https://aclanthology.org/K18-2001/}
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
@article{dozat2017biaffine,
|
| 359 |
+
title={Deep Biaffine Attention for Neural Dependency Parsing},
|
| 360 |
+
author={Dozat, Timothy and Manning, Christopher D.},
|
| 361 |
+
journal={arXiv preprint arXiv:1611.01734},
|
| 362 |
+
year={2017},
|
| 363 |
+
note={Published at ICLR 2017},
|
| 364 |
+
url={https://arxiv.org/abs/1611.01734}
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
% ============================================
|
| 368 |
+
% H. Silver-Standard and Semi-Automatic Treebank Construction
|
| 369 |
+
% ============================================
|
| 370 |
+
|
| 371 |
+
@inproceedings{udchildes2025,
|
| 372 |
+
title={{UD}-{E}nglish-{CHILDES}: Gold and Silver Universal Dependencies Trees},
|
| 373 |
+
author={{Multiple authors}},
|
| 374 |
+
booktitle={Proceedings of UDW 2025},
|
| 375 |
+
year={2025},
|
| 376 |
+
url={https://arxiv.org/abs/2504.20304}
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
@inproceedings{nguyen2009building,
|
| 380 |
+
title={Building a Large Syntactically-Annotated Corpus of {V}ietnamese},
|
| 381 |
+
author={Nguyen, Phuong-Thai and Vu, Xuan-Luong and Nguyen, Thi-Minh-Huyen and Nguyen, Van-Hiep and Le, Hong-Phuong},
|
| 382 |
+
booktitle={Proceedings of the Third Linguistic Annotation Workshop (LAW III)},
|
| 383 |
+
pages={182--185},
|
| 384 |
+
year={2009},
|
| 385 |
+
url={https://aclanthology.org/W09-3028/}
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
% ============================================
|
| 389 |
+
% I. Domain Adaptation and Cross-lingual Transfer
|
| 390 |
+
% ============================================
|
| 391 |
+
|
| 392 |
+
@inproceedings{wagner2020treebank,
|
| 393 |
+
title={Treebank Embedding Vectors for Out-of-Domain Dependency Parsing},
|
| 394 |
+
author={Wagner, Joachim and Barry, James and Foster, Jennifer},
|
| 395 |
+
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
|
| 396 |
+
pages={8812--8818},
|
| 397 |
+
year={2020},
|
| 398 |
+
url={https://aclanthology.org/2020.acl-main.778/}
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
@inproceedings{uzbek2025ud,
|
| 402 |
+
title={Universal Dependencies Treebank for {U}zbek},
|
| 403 |
+
author={{Multiple authors}},
|
| 404 |
+
booktitle={Proceedings of the ResourceFul Workshop, ACL 2025},
|
| 405 |
+
year={2025},
|
| 406 |
+
url={https://aclanthology.org/2025.resourceful-1.1/}
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
@article{crosslingual2023subnetworks,
|
| 410 |
+
title={Cross-Lingual Transfer with Language-Specific Subnetworks for Low-Resource Dependency Parsing},
|
| 411 |
+
author={{Google Research, MIT}},
|
| 412 |
+
journal={Computational Linguistics},
|
| 413 |
+
volume={49},
|
| 414 |
+
number={3},
|
| 415 |
+
pages={613--654},
|
| 416 |
+
year={2023},
|
| 417 |
+
url={https://direct.mit.edu/coli/article/49/3/613/116157}
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
@inproceedings{conneau2020xlmr,
|
| 421 |
+
title={Unsupervised Cross-lingual Representation Learning at Scale},
|
| 422 |
+
author={Conneau, Alexis and others},
|
| 423 |
+
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
|
| 424 |
+
pages={8440--8451},
|
| 425 |
+
year={2020},
|
| 426 |
+
url={https://arxiv.org/abs/1911.02116}
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
@inproceedings{brants1998automation,
|
| 430 |
+
title={Automation of Treebank Annotation},
|
| 431 |
+
author={Brants, Thorsten and Skut, Wojciech},
|
| 432 |
+
booktitle={Proceedings of the Joint Conference on New Methods in Language Processing and Computational Natural Language Learning (NeMLaP/CoNLL)},
|
| 433 |
+
pages={49--57},
|
| 434 |
+
year={1998},
|
| 435 |
+
url={https://aclanthology.org/W98-1207.pdf}
|
| 436 |
+
}
|
references/research_vietnamese_dep_parsing/comparison.md
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Comparison Tables: Vietnamese Dependency Parsing
|
| 2 |
+
|
| 3 |
+
## Table 1: Vietnamese Dependency Treebank Comparison
|
| 4 |
+
|
| 5 |
+
| Feature | UD_Vietnamese-VTB | VnDT v1.1 | BKTreebank | VLSP 2020 | DGDT | UDD-1 |
|
| 6 |
+
|---------|:-----------------:|:----------:|:----------:|:---------:|:----:|:-----:|
|
| 7 |
+
| **Sentences** | 3,323 | 10,200 | 6,900 | 8,152 | -- | 10,000 |
|
| 8 |
+
| **Tokens** | 58,069 | -- | -- | -- | -- | 230,709 |
|
| 9 |
+
| **Domain** | News | News | News | Mixed | Multi-domain | Legal |
|
| 10 |
+
| **Format** | CoNLL-U | CoNLL | CoNLL | CoNLL-U | CoNLL-U | CoNLL-U |
|
| 11 |
+
| **UD compliant** | Yes | No | No | Yes | Yes | Yes |
|
| 12 |
+
| **Annotation method** | Auto-converted | Auto-converted | Manual | -- | -- | Silver (neural + rules) |
|
| 13 |
+
| **Source treebank** | VLSP constituency | VLSP constituency | From scratch | -- | -- | Underthesea parser |
|
| 14 |
+
| **UPOS tags** | 17 | Custom | Custom | 17 | 17 | 15 |
|
| 15 |
+
| **DEPREL types** | -- | -- | -- | -- | -- | 77 |
|
| 16 |
+
| **Train/Dev/Test** | 1,400/800/1,123 | 8,200/1,020/1,020 | -- | 8,152/--/1,123 | -- | 9,165/432/403 |
|
| 17 |
+
| **Publicly available** | Yes (GitHub) | Yes (GitHub) | Limited | VLSP only | -- | Yes (HuggingFace) |
|
| 18 |
+
| **Year** | 2017 | 2014/2018 | 2018 | 2020 | 2025 | 2026 |
|
| 19 |
+
|
| 20 |
+
## Table 2: Vietnamese Dependency Parsing SOTA (VnDT v1.1)
|
| 21 |
+
|
| 22 |
+
| Rank | Model | Architecture | Pre-trained LM | UAS | LAS | Year | Reference |
|
| 23 |
+
|:----:|-------|-------------|----------------|----:|----:|------|-----------|
|
| 24 |
+
| 1 | PhoNLP | Biaffine (multi-task) | PhoBERT-base | 85.47 | 79.11 | 2021 | [25] |
|
| 25 |
+
| 2 | PhoBERT single-task | Biaffine | PhoBERT-base | 85.22 | 78.77 | 2020 | [26] |
|
| 26 |
+
| 3 | HPSG parser | HPSG + Biaffine | PhoBERT-large | 85.73 | 78.42 | 2024 | [22] |
|
| 27 |
+
| 4 | Biaffine | Deep biaffine attention | word2vec | 81.19 | 74.99 | 2017 | [36] |
|
| 28 |
+
| 5 | VnCoreNLP | Transition-based | word2vec | 79.02 | 73.39 | 2018 | [24] |
|
| 29 |
+
| 6 | Easy-first | LSTM easy-first | char embeddings | 80.91 | 72.98 | 2018 | [19] |
|
| 30 |
+
|
| 31 |
+
## Table 3: Vietnamese Dependency Parsing on UD_Vietnamese-VTB
|
| 32 |
+
|
| 33 |
+
| Rank | Model | Architecture | UAS | LAS | Year | Reference |
|
| 34 |
+
|:----:|-------|-------------|----:|----:|------|-----------|
|
| 35 |
+
| 1 | Trankit v0.3.1 | XLM-R + adapters | 70.96 | 64.76 | 2021 | [28] |
|
| 36 |
+
| 2 | Stanza v1.1.1 | BiLSTM + biaffine | 48.16 | 53.63 | 2020 | [27] |
|
| 37 |
+
|
| 38 |
+
## Table 4: VLSP 2020 Shared Task Results (UD format)
|
| 39 |
+
|
| 40 |
+
| Rank | Model | Architecture | UAS | LAS | Reference |
|
| 41 |
+
|:----:|-------|-------------|----:|----:|-----------|
|
| 42 |
+
| 1 | PhoBERT+ELMO/Biaffine | Biaffine + contextualized | 84.65 | 76.27 | [13] |
|
| 43 |
+
| 2 | GNN-based parser | Graph neural networks | 81.71 | 73.19 | [14] |
|
| 44 |
+
| 3 | BiLSTM Biaffine | BiLSTM + biaffine | -- | -- | [15] |
|
| 45 |
+
|
| 46 |
+
## Table 5: Treebank Construction Methods Comparison
|
| 47 |
+
|
| 48 |
+
| Method | Examples | Pros | Cons | Applicable to UDD |
|
| 49 |
+
|--------|----------|------|------|--------------------|
|
| 50 |
+
| **Constituency-to-dependency** | VnDT, UD_Vietnamese-VTB | Leverages existing constituency resources | Conversion errors, lossy mapping | No (no constituency source) |
|
| 51 |
+
| **Manual annotation** | BKTreebank | High quality, consistent | Expensive, slow (~3K sent/team/year) | Partially (for gold evaluation) |
|
| 52 |
+
| **Cross-lingual projection** | Chinese-Vietnamese alignment | No Vietnamese-specific parser needed | Alignment errors, typological mismatch | No (no parallel corpus) |
|
| 53 |
+
| **Silver-standard (neural)** | UD-English-CHILDES, UDD-1 | Scalable, fast | Parser errors propagate | Yes (primary method) |
|
| 54 |
+
| **Bootstrapping** | NEGRA, Penn Treebank | Improves over iterations | Requires initial parser | Yes (iterative refinement) |
|
| 55 |
+
|
| 56 |
+
## Table 6: Vietnamese Pre-trained Language Models for Parsing
|
| 57 |
+
|
| 58 |
+
| Model | Architecture | Training Data | Parameters | Vietnamese Parsing Impact | Reference |
|
| 59 |
+
|-------|-------------|---------------|-----------|---------------------------|-----------|
|
| 60 |
+
| PhoBERT-base | RoBERTa | 20GB Vietnamese text | 135M | +3-5% LAS over non-pretrained | [26] |
|
| 61 |
+
| PhoBERT-large | RoBERTa | 20GB Vietnamese text | 370M | +0.5% UAS over base | [26] |
|
| 62 |
+
| XLM-RoBERTa-large | RoBERTa | 2.5TB multilingual | 550M | Used in Trankit (64.76 LAS on VTB) | [42] |
|
| 63 |
+
| multilingual BERT | Transformer | Multilingual Wikipedia | 110M | Baseline for cross-lingual transfer | -- |
|
| 64 |
+
|
| 65 |
+
## Table 7: Domain Coverage in Vietnamese NLP Resources
|
| 66 |
+
|
| 67 |
+
| Domain | Dependency Treebank | Constituency Treebank | Raw Corpus | Notes |
|
| 68 |
+
|--------|:-------------------:|:---------------------:|:----------:|-------|
|
| 69 |
+
| News | VnDT, BKTreebank, VTB | VietTreebank | Large | Most studied domain |
|
| 70 |
+
| Legal | **UDD-1** | None | UTS_VLC | First UD treebank for legal |
|
| 71 |
+
| Literature/Books | None | None | UVB-v0.1 | Planned for UDD expansion |
|
| 72 |
+
| Scientific | None | None | Limited | Underrepresented |
|
| 73 |
+
| Medical | None | None | Limited | Underrepresented |
|
| 74 |
+
| Social media | None | None | Available | Informal language challenges |
|
| 75 |
+
| Conversational | None | None | Limited | Spoken language features |
|
references/research_vietnamese_dep_parsing/papers.md
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Paper Database: Vietnamese Dependency Parsing Datasets
|
| 2 |
+
|
| 3 |
+
## A. Vietnamese Dependency Treebank Resources
|
| 4 |
+
|
| 5 |
+
### 1. From Treebank Conversion to Automatic Dependency Parsing for Vietnamese
|
| 6 |
+
- **Authors**: Dat Quoc Nguyen, Dai Quoc Nguyen, Son Bao Pham, Phuong-Thai Nguyen, Minh Le Nguyen
|
| 7 |
+
- **Venue**: NLDB 2014, Springer
|
| 8 |
+
- **URL**: https://link.springer.com/chapter/10.1007/978-3-319-07983-7_26
|
| 9 |
+
- **GitHub**: https://github.com/datquocnguyen/VnDT
|
| 10 |
+
- **Relevance**: Presents VnDT (~10,200 sentences), the first large Vietnamese dependency treebank created by conversion from VietTreebank constituency treebank. Primary benchmark for Vietnamese dependency parsing.
|
| 11 |
+
|
| 12 |
+
### 2. BKTreebank: Building a Vietnamese Dependency Treebank
|
| 13 |
+
- **Authors**: Kiem-Hieu Nguyen
|
| 14 |
+
- **Venue**: LREC 2018, Miyazaki, Japan
|
| 15 |
+
- **URL**: https://aclanthology.org/L18-1341/
|
| 16 |
+
- **Relevance**: 6,900 manually annotated sentences from news domain. 3 annotators, BRAT tool. Independent Vietnamese dependency treebank with custom POS tagset and dependency relations.
|
| 17 |
+
|
| 18 |
+
### 3. UD_Vietnamese-VTB
|
| 19 |
+
- **Authors**: VLSP Project contributors
|
| 20 |
+
- **Venue**: Universal Dependencies project (ongoing)
|
| 21 |
+
- **URL**: https://universaldependencies.org/treebanks/vi_vtb/index.html
|
| 22 |
+
- **GitHub**: https://github.com/UniversalDependencies/UD_Vietnamese-VTB
|
| 23 |
+
- **Relevance**: Only official Vietnamese UD treebank. 3,323 sentences, 58,069 tokens from news. Auto-converted from VLSP constituency treebank.
|
| 24 |
+
|
| 25 |
+
### 4. Building a Treebank for Vietnamese Dependency Parsing
|
| 26 |
+
- **Authors**: Phuong Le-Hong, Thi Minh Huyen Nguyen, Azim Roussanaly, Ho Tuong Vinh
|
| 27 |
+
- **Venue**: IEEE KSE 2013
|
| 28 |
+
- **URL**: https://ieeexplore.ieee.org/document/6719884
|
| 29 |
+
- **Relevance**: Early work on constituency-to-dependency conversion for Vietnamese. Defines head-percolation rules and Vietnamese dependency label set.
|
| 30 |
+
|
| 31 |
+
### 5. Domain Generalization in Vietnamese Dependency Parsing (DGDT)
|
| 32 |
+
- **Authors**: D. Huynh, V.H. Le, C.A. Truong, C.M. Huynh, Y.T. Nguyen, Q.T. Nguyen
|
| 33 |
+
- **Venue**: SOICT 2024 (Springer CCIS vol 2350)
|
| 34 |
+
- **URL**: https://link.springer.com/chapter/10.1007/978-981-96-4282-3_14
|
| 35 |
+
- **Relevance**: Introduces DGDT benchmark for cross-domain Vietnamese dependency parsing. Shows 3.27% UAS / 5.09% LAS degradation across domains.
|
| 36 |
+
|
| 37 |
+
---
|
| 38 |
+
|
| 39 |
+
## B. Vietnamese Treebank Quality and Construction
|
| 40 |
+
|
| 41 |
+
### 6. Vietnamese Treebank Construction and Entropy-based Error Detection
|
| 42 |
+
- **Authors**: Phuong Le-Hong, Thi Minh Huyen Nguyen, et al.
|
| 43 |
+
- **Venue**: Language Resources and Evaluation, 2015, Springer
|
| 44 |
+
- **URL**: https://link.springer.com/article/10.1007/s10579-015-9308-5
|
| 45 |
+
- **Relevance**: Large Vietnamese treebank (~40,000 sentences), 3 annotation layers. Entropy-based error detection. ~90% inter-annotator agreement.
|
| 46 |
+
|
| 47 |
+
### 7. Challenges and Solutions for Consistent Annotation of Vietnamese Treebank
|
| 48 |
+
- **Authors**: Quy Nguyen, Yusuke Miyao, Ha Le, Ngan Nguyen
|
| 49 |
+
- **Venue**: LREC 2016, Portoroz, Slovenia
|
| 50 |
+
- **URL**: https://aclanthology.org/L16-1243/
|
| 51 |
+
- **Relevance**: Inter-annotator agreement >90%. Vietnamese-specific challenges in word segmentation, POS tagging, and syntactic bracketing.
|
| 52 |
+
|
| 53 |
+
### 8. Automatic Detection of Problematic Rules in Vietnamese Treebank
|
| 54 |
+
- **Authors**: (VNU researchers)
|
| 55 |
+
- **Venue**: IEEE Conference, 2014
|
| 56 |
+
- **URL**: https://ieeexplore.ieee.org/document/7049867/
|
| 57 |
+
- **Relevance**: Automated treebank error detection using equivalence classes and vertical markovization.
|
| 58 |
+
|
| 59 |
+
### 9. Converting a Constituency Treebank to Dependency Treebank for Vietnamese
|
| 60 |
+
- **Authors**: (Vietnamese researchers)
|
| 61 |
+
- **Venue**: IEEE Conference, 2022
|
| 62 |
+
- **URL**: https://ieeexplore.ieee.org/document/10013806/
|
| 63 |
+
- **Relevance**: New head-percolation rules achieving ~13% UAS and ~21% LAS improvement over previous conversion methods.
|
| 64 |
+
|
| 65 |
+
### 10. BERT-Based Sentence Recommendation for Building Vietnamese UD Treebank
|
| 66 |
+
- **Authors**: (Vietnamese researchers)
|
| 67 |
+
- **Venue**: FDSE 2023, Springer
|
| 68 |
+
- **URL**: https://link.springer.com/chapter/10.1007/978-981-99-8296-7_28
|
| 69 |
+
- **Relevance**: BERT-based approach to recommend sentences likely to be parsed with high LAS for efficient treebank construction.
|
| 70 |
+
|
| 71 |
+
### 11. Building Vietnamese Dependency Treebank Based on Chinese-Vietnamese Bilingual Word Alignment
|
| 72 |
+
- **Authors**: (Chinese-Vietnamese NLP researchers)
|
| 73 |
+
- **Venue**: IEEE Conference, 2016
|
| 74 |
+
- **URL**: https://ieeexplore.ieee.org/document/7603371/
|
| 75 |
+
- **Relevance**: Cross-lingual annotation projection from Chinese to Vietnamese via bilingual word alignment.
|
| 76 |
+
|
| 77 |
+
---
|
| 78 |
+
|
| 79 |
+
## C. VLSP Shared Tasks
|
| 80 |
+
|
| 81 |
+
### 12. VLSP 2020 Shared Task: Universal Dependency Parsing for Vietnamese
|
| 82 |
+
- **Authors**: Ha My Linh, Nguyen Thi Minh Huyen, Vu Xuan Luong, Nguyen Thi Luong, Phan Thi Hue, Le Van Cuong
|
| 83 |
+
- **Venue**: VLSP 2020 (7th Int'l Workshop on Vietnamese Language and Speech Processing)
|
| 84 |
+
- **URL**: https://aclanthology.org/2020.vlsp-1.15/
|
| 85 |
+
- **Relevance**: Official shared task overview. UD v2 corpus (8,152 train + 1,123 test sentences). Best system: 76.27 LAS (PhoBERT+ELMO/Biaffine).
|
| 86 |
+
|
| 87 |
+
### 13. A Joint Deep Contextualized Word Representation for Deep Biaffine Dependency Parsing
|
| 88 |
+
- **Authors**: Xuan-Dung Doan
|
| 89 |
+
- **Venue**: VLSP 2020
|
| 90 |
+
- **URL**: https://aclanthology.org/2020.vlsp-1.10/
|
| 91 |
+
- **Relevance**: VLSP 2020 participant using deep contextualized representations with biaffine parsing.
|
| 92 |
+
|
| 93 |
+
### 14. Applying Graph Neural Networks for Vietnamese Dependency Parsing
|
| 94 |
+
- **Authors**: Nguyen Duc Thien, Nguyen Thi Thu Trang, Truong Dang Quang
|
| 95 |
+
- **Venue**: VLSP 2020
|
| 96 |
+
- **URL**: https://aclanthology.org/2020.vlsp-1.11/
|
| 97 |
+
- **Relevance**: GNN approach for Vietnamese dependency parsing. 73.19 LAS.
|
| 98 |
+
|
| 99 |
+
### 15. Implementing Bi-LSTM-based Deep Biaffine Neural Dependency Parser for Vietnamese
|
| 100 |
+
- **Authors**: Lien Nguyen
|
| 101 |
+
- **Venue**: VLSP 2020
|
| 102 |
+
- **URL**: https://aclanthology.org/2020.vlsp-1.12/
|
| 103 |
+
- **Relevance**: BiLSTM biaffine architecture for Vietnamese UD parsing.
|
| 104 |
+
|
| 105 |
+
---
|
| 106 |
+
|
| 107 |
+
## D. Vietnamese Dependency Parsing Methods
|
| 108 |
+
|
| 109 |
+
### 16. An Empirical Study for Vietnamese Dependency Parsing
|
| 110 |
+
- **Authors**: Dat Quoc Nguyen, Mark Dras, Mark Johnson
|
| 111 |
+
- **Venue**: ALTA 2016, Melbourne
|
| 112 |
+
- **URL**: https://aclanthology.org/U16-1017/
|
| 113 |
+
- **Relevance**: Empirical comparison of graph-based vs. transition-based parsing on VnDT.
|
| 114 |
+
|
| 115 |
+
### 17. Error Analysis for Vietnamese Dependency Parsing
|
| 116 |
+
- **Authors**: Kiet Van Nguyen, Ngan Luu-Thuy Nguyen
|
| 117 |
+
- **Venue**: arXiv:1911.03724
|
| 118 |
+
- **URL**: https://arxiv.org/abs/1911.03724
|
| 119 |
+
- **Relevance**: Detailed error analysis of MSTParser and MaltParser on Vietnamese.
|
| 120 |
+
|
| 121 |
+
### 18. Vietnamese Transition-based Dependency Parsing with Supertag Features
|
| 122 |
+
- **Authors**: Kiet Van Nguyen, Ngan Luu-Thuy Nguyen
|
| 123 |
+
- **Venue**: arXiv:1911.03726 / IEEE KSE 2016
|
| 124 |
+
- **URL**: https://arxiv.org/abs/1911.03726
|
| 125 |
+
- **Relevance**: 18.92% LAS improvement with gold supertags, 3.57% with automatic supertags on VnDT.
|
| 126 |
+
|
| 127 |
+
### 19. LSTM Easy-first Dependency Parsing with Pre-trained Word Embeddings in Vietnamese
|
| 128 |
+
- **Authors**: Binh Duc Nguyen et al.
|
| 129 |
+
- **Venue**: IEEE KSE 2018 / arXiv:1910.13732
|
| 130 |
+
- **URL**: https://arxiv.org/abs/1910.13732
|
| 131 |
+
- **Relevance**: Character-level embeddings and easy-first parsing for Vietnamese. 80.91% UAS, 72.98% LAS on VnDT.
|
| 132 |
+
|
| 133 |
+
### 20. Using BiLSTM in Dependency Parsing for Vietnamese
|
| 134 |
+
- **Authors**: Nguyen Thi My et al.
|
| 135 |
+
- **Venue**: Computacion y Sistemas, 2018
|
| 136 |
+
- **URL**: https://www.scielo.org.mx/scielo.php?script=sci_arttext&pid=S1405-55462018000300853
|
| 137 |
+
- **Relevance**: BiLSTM for both transition-based and graph-based Vietnamese dependency parsing.
|
| 138 |
+
|
| 139 |
+
### 21. A Neural Joint Model for Vietnamese Word Segmentation, POS Tagging and Dependency Parsing
|
| 140 |
+
- **Authors**: Dat Quoc Nguyen
|
| 141 |
+
- **Venue**: ALTA 2019, Sydney
|
| 142 |
+
- **URL**: https://aclanthology.org/U19-1004/
|
| 143 |
+
- **Relevance**: First multi-task learning model for joint Vietnamese word segmentation, POS tagging, and dependency parsing.
|
| 144 |
+
|
| 145 |
+
### 22. An Attempt to Develop a Neural Parser based on Simplified HPSG on Vietnamese
|
| 146 |
+
- **Authors**: Duc-Vu Nguyen, Thang Chau Phan, Quoc-Nam Nguyen, Kiet Van Nguyen, Ngan Luu-Thuy Nguyen
|
| 147 |
+
- **Venue**: arXiv:2411.17270 (2024)
|
| 148 |
+
- **URL**: https://arxiv.org/abs/2411.17270
|
| 149 |
+
- **Relevance**: HPSG-based neural parser for Vietnamese. New SOTA 82% F-score for constituency parsing. Identifies ~15% non-compliant trees in VietTreebank/VnDT.
|
| 150 |
+
|
| 151 |
+
### 23. Adapting Cross-lingual Model to Improve Vietnamese Dependency Parsing
|
| 152 |
+
- **Authors**: Anh Duc Do Tran, Dien Dinh, An-Vinh Luong, Thao Do
|
| 153 |
+
- **Venue**: ICABDE 2021, Springer
|
| 154 |
+
- **URL**: https://researchportal.bath.ac.uk/en/publications/adapting-cross-lingual-model-to-improve-vietnamese-dependency-par
|
| 155 |
+
- **Relevance**: Cross-lingual transfer from English using biaffine attention with sentence filtering.
|
| 156 |
+
|
| 157 |
+
---
|
| 158 |
+
|
| 159 |
+
## E. Vietnamese NLP Toolkits
|
| 160 |
+
|
| 161 |
+
### 24. VnCoreNLP: A Vietnamese Natural Language Processing Toolkit
|
| 162 |
+
- **Authors**: Thanh Vu, Dat Quoc Nguyen, Dai Quoc Nguyen, Mark Dras, Mark Johnson
|
| 163 |
+
- **Venue**: NAACL 2018 Demonstrations
|
| 164 |
+
- **URL**: https://aclanthology.org/N18-5012/
|
| 165 |
+
- **Relevance**: Java-based NLP pipeline. Word segmentation, POS tagging, NER, dependency parsing. 73.39% LAS on VnDT.
|
| 166 |
+
|
| 167 |
+
### 25. PhoNLP: A Joint Multi-task Learning Model for Vietnamese
|
| 168 |
+
- **Authors**: Linh The Nguyen, Dat Quoc Nguyen
|
| 169 |
+
- **Venue**: NAACL 2021 Demonstrations
|
| 170 |
+
- **URL**: https://arxiv.org/abs/2101.01476
|
| 171 |
+
- **Relevance**: PhoBERT-based multi-task model. SOTA on VnDT: 79.11% LAS, 85.47% UAS.
|
| 172 |
+
|
| 173 |
+
### 26. PhoBERT: Pre-trained Language Models for Vietnamese
|
| 174 |
+
- **Authors**: Dat Quoc Nguyen, Anh Tuan Nguyen
|
| 175 |
+
- **Venue**: Findings of EMNLP 2020
|
| 176 |
+
- **URL**: https://aclanthology.org/2020.findings-emnlp.92/
|
| 177 |
+
- **Relevance**: First large-scale Vietnamese pre-trained LM. Foundation for most top Vietnamese parsing systems.
|
| 178 |
+
|
| 179 |
+
### 27. Stanza: A Python NLP Toolkit for Many Human Languages
|
| 180 |
+
- **Authors**: Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, Christopher D. Manning
|
| 181 |
+
- **Venue**: ACL 2020 Demonstrations
|
| 182 |
+
- **URL**: https://aclanthology.org/2020.acl-demos.14/
|
| 183 |
+
- **Relevance**: Multilingual toolkit with Vietnamese support. LAS 53.63 on UD_Vietnamese-VTB. Baseline for silver treebank generation.
|
| 184 |
+
|
| 185 |
+
### 28. Trankit: A Light-Weight Transformer-based Toolkit for Multilingual NLP
|
| 186 |
+
- **Authors**: Minh Van Nguyen, Viet Dac Lai, Amir Pouran Ben Veyseh, Thien Huu Nguyen
|
| 187 |
+
- **Venue**: EACL 2021 Demonstrations (Outstanding Demo Paper)
|
| 188 |
+
- **URL**: https://aclanthology.org/2021.eacl-demos.10/
|
| 189 |
+
- **Relevance**: XLM-RoBERTa-large with adapters. LAS 64.76 on UD_Vietnamese-VTB. Supports 56 languages.
|
| 190 |
+
|
| 191 |
+
---
|
| 192 |
+
|
| 193 |
+
## F. Vietnamese Semantic Parsing
|
| 194 |
+
|
| 195 |
+
### 29. Sentential Semantic Dependency Parsing for Vietnamese
|
| 196 |
+
- **Authors**: Do T.T., Nguyen D.T.
|
| 197 |
+
- **Venue**: SN Computer Science, vol 2, 2021
|
| 198 |
+
- **URL**: https://link.springer.com/article/10.1007/s42979-021-00715-4
|
| 199 |
+
- **Relevance**: Extends syntactic to semantic dependency graphs using Vietnamese Lexicon Ontology.
|
| 200 |
+
|
| 201 |
+
### 30. VLO V1.1 - A Vietnamese Lexicon Ontology for Universal Dependency Parsing
|
| 202 |
+
- **Authors**: (Vietnamese researchers)
|
| 203 |
+
- **Venue**: IEEE Conference, 2021
|
| 204 |
+
- **URL**: https://ieeexplore.ieee.org/document/9353080/
|
| 205 |
+
- **Relevance**: Lexical semantic constraints for Vietnamese dependency parsing (isolating language challenges).
|
| 206 |
+
|
| 207 |
+
---
|
| 208 |
+
|
| 209 |
+
## G. UD Framework and CoNLL Shared Tasks
|
| 210 |
+
|
| 211 |
+
### 31. Universal Dependencies v2: An Evergrowing Multilingual Treebank Collection
|
| 212 |
+
- **Authors**: Joakim Nivre et al.
|
| 213 |
+
- **Venue**: LREC 2020, Marseille
|
| 214 |
+
- **URL**: https://aclanthology.org/2020.lrec-1.497/
|
| 215 |
+
- **Relevance**: UD v2 framework and guidelines. 200+ treebanks, 150+ languages.
|
| 216 |
+
|
| 217 |
+
### 32. Universal Dependencies v1: A Multilingual Treebank Collection
|
| 218 |
+
- **Authors**: Joakim Nivre et al.
|
| 219 |
+
- **Venue**: LREC 2016
|
| 220 |
+
- **URL**: https://aclanthology.org/L16-1262/
|
| 221 |
+
- **Relevance**: Original UD v1 framework.
|
| 222 |
+
|
| 223 |
+
### 33. Universal Dependencies
|
| 224 |
+
- **Authors**: Marie-Catherine de Marneffe, Christopher D. Manning, Joakim Nivre, Daniel Zeman
|
| 225 |
+
- **Venue**: Computational Linguistics, 47(2):255-308, 2021
|
| 226 |
+
- **URL**: https://direct.mit.edu/coli/article/47/2/255/98516/Universal-Dependencies
|
| 227 |
+
- **Relevance**: Definitive overview of UD's theoretical foundation, 17 UPOS categories, dependency relation taxonomy.
|
| 228 |
+
|
| 229 |
+
### 34. CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to UD
|
| 230 |
+
- **Authors**: Daniel Zeman et al.
|
| 231 |
+
- **Venue**: CoNLL 2017, Vancouver
|
| 232 |
+
- **URL**: https://aclanthology.org/K17-3001/
|
| 233 |
+
- **Relevance**: First major multilingual evaluation including Vietnamese UD parsing.
|
| 234 |
+
|
| 235 |
+
### 35. CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to UD
|
| 236 |
+
- **Authors**: Daniel Zeman et al.
|
| 237 |
+
- **Venue**: CoNLL 2018, Brussels
|
| 238 |
+
- **URL**: https://aclanthology.org/K18-2001/
|
| 239 |
+
- **Relevance**: Second edition. Noted Vietnamese UD treebank conversion errors from v1 to v2.
|
| 240 |
+
|
| 241 |
+
### 36. Deep Biaffine Attention for Neural Dependency Parsing
|
| 242 |
+
- **Authors**: Timothy Dozat, Christopher D. Manning
|
| 243 |
+
- **Venue**: ICLR 2017
|
| 244 |
+
- **URL**: https://arxiv.org/abs/1611.01734
|
| 245 |
+
- **Relevance**: Foundation architecture for most modern Vietnamese dependency parsers including Underthesea.
|
| 246 |
+
|
| 247 |
+
---
|
| 248 |
+
|
| 249 |
+
## H. Silver-Standard and Semi-Automatic Treebank Construction
|
| 250 |
+
|
| 251 |
+
### 37. UD-English-CHILDES: Gold and Silver Universal Dependencies Trees
|
| 252 |
+
- **Authors**: (Multiple)
|
| 253 |
+
- **Venue**: UDW 2025
|
| 254 |
+
- **URL**: https://arxiv.org/abs/2504.20304
|
| 255 |
+
- **Relevance**: Methodological reference: 1M-sentence silver treebank using Stanza alongside 48K gold treebank.
|
| 256 |
+
|
| 257 |
+
### 38. Building a Large Syntactically-Annotated Corpus of Vietnamese
|
| 258 |
+
- **Authors**: Phuong-Thai Nguyen, Xuan-Luong Vu, Thi-Minh-Huyen Nguyen, Van-Hiep Nguyen, Hong-Phuong Le
|
| 259 |
+
- **Venue**: LAW III (3rd Linguistic Annotation Workshop), 2009
|
| 260 |
+
- **URL**: https://aclanthology.org/W09-3028/
|
| 261 |
+
- **Relevance**: Original VietTreebank constituency treebank. Source for VnDT and UD_Vietnamese-VTB.
|
| 262 |
+
|
| 263 |
+
---
|
| 264 |
+
|
| 265 |
+
## I. Domain Adaptation and Cross-lingual Transfer
|
| 266 |
+
|
| 267 |
+
### 39. Treebank Embedding Vectors for Out-of-Domain Dependency Parsing
|
| 268 |
+
- **Authors**: Joachim Wagner, James Barry, Jennifer Foster
|
| 269 |
+
- **Venue**: ACL 2020
|
| 270 |
+
- **URL**: https://aclanthology.org/2020.acl-main.778/
|
| 271 |
+
- **Relevance**: Treebank embeddings for multi-domain parsing adaptation.
|
| 272 |
+
|
| 273 |
+
### 40. Universal Dependencies Treebank for Uzbek
|
| 274 |
+
- **Authors**: (Multiple)
|
| 275 |
+
- **Venue**: ResourceFul Workshop, ACL 2025
|
| 276 |
+
- **URL**: https://aclanthology.org/2025.resourceful-1.1/
|
| 277 |
+
- **Relevance**: Methodology for building UD treebank for low-resource language (500 sentences, news+fiction).
|
| 278 |
+
|
| 279 |
+
### 41. Cross-Lingual Transfer with Language-Specific Subnetworks for Low-Resource Dependency Parsing
|
| 280 |
+
- **Authors**: (Google Research, MIT)
|
| 281 |
+
- **Venue**: Computational Linguistics, 49(3), 2023
|
| 282 |
+
- **URL**: https://direct.mit.edu/coli/article/49/3/613/116157
|
| 283 |
+
- **Relevance**: Language-specific subnetworks for cross-lingual transfer to low-resource targets.
|
| 284 |
+
|
| 285 |
+
### 42. Unsupervised Cross-lingual Representation Learning at Scale (XLM-RoBERTa)
|
| 286 |
+
- **Authors**: Alexis Conneau et al.
|
| 287 |
+
- **Venue**: ACL 2020
|
| 288 |
+
- **URL**: https://arxiv.org/abs/1911.02116
|
| 289 |
+
- **Relevance**: XLM-RoBERTa multilingual model used in Vietnamese cross-lingual parsing experiments.
|
| 290 |
+
|
| 291 |
+
### 43. Automation of Treebank Annotation
|
| 292 |
+
- **Authors**: Thorsten Brants, Wojciech Skut
|
| 293 |
+
- **Venue**: NeMLaP 1998
|
| 294 |
+
- **URL**: https://aclanthology.org/W98-1207.pdf
|
| 295 |
+
- **Relevance**: Foundational work on bootstrapping strategy for semi-automatic treebank construction.
|
references/research_vietnamese_dep_parsing/sota.md
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# State-of-the-Art: Vietnamese Dependency Parsing
|
| 2 |
+
|
| 3 |
+
**Last updated**: 2026-02-08
|
| 4 |
+
|
| 5 |
+
## Summary
|
| 6 |
+
|
| 7 |
+
Vietnamese dependency parsing has progressed from transition-based and graph-based parsers (2014-2017) to neural architectures with pre-trained language models (2020-present). The current SOTA is **PhoNLP** achieving **79.11% LAS / 85.47% UAS** on VnDT v1.1 using PhoBERT-based multi-task learning. Performance on UD_Vietnamese-VTB remains significantly lower (best: 64.76% LAS with Trankit), attributed to annotation quality issues in the automatic constituency-to-dependency conversion.
|
| 8 |
+
|
| 9 |
+
## Key Benchmarks
|
| 10 |
+
|
| 11 |
+
### VnDT v1.1 (Primary Benchmark)
|
| 12 |
+
- **Dataset**: 10,200 sentences, news domain, CoNLL format
|
| 13 |
+
- **Split**: 8,200 train / 1,020 dev / 1,020 test
|
| 14 |
+
- **Best LAS**: 79.11% (PhoNLP, 2021)
|
| 15 |
+
- **Best UAS**: 85.73% (HPSG + PhoBERT-large, 2024)
|
| 16 |
+
|
| 17 |
+
### UD_Vietnamese-VTB
|
| 18 |
+
- **Dataset**: 3,323 sentences, news domain, CoNLL-U format
|
| 19 |
+
- **Split**: 1,400 train / 800 dev / 1,123 test
|
| 20 |
+
- **Best LAS**: 64.76% (Trankit, 2021)
|
| 21 |
+
- **Best UAS**: 70.96% (Trankit, 2021)
|
| 22 |
+
|
| 23 |
+
### VLSP 2020
|
| 24 |
+
- **Dataset**: 8,152 train + 1,123 test, UD format
|
| 25 |
+
- **Best LAS**: 76.27% (PhoBERT+ELMO/Biaffine)
|
| 26 |
+
- **Best UAS**: 84.65% (PhoBERT+ELMO/Biaffine)
|
| 27 |
+
|
| 28 |
+
## Evolution of Approaches
|
| 29 |
+
|
| 30 |
+
### Phase 1: Classical Parsing (2014-2016)
|
| 31 |
+
- **Transition-based**: MaltParser, adapted for Vietnamese with supertag features [18]
|
| 32 |
+
- **Graph-based**: MSTParser on VnDT [16, 17]
|
| 33 |
+
- **Best LAS**: ~68% on VnDT
|
| 34 |
+
- Key contribution: VnDT treebank creation [1], empirical comparison of methods [16]
|
| 35 |
+
|
| 36 |
+
### Phase 2: Neural Parsing (2017-2019)
|
| 37 |
+
- **Deep biaffine attention** (Dozat & Manning, 2017): 74.99% LAS on VnDT [36]
|
| 38 |
+
- **LSTM easy-first**: 72.98% LAS with character embeddings [19]
|
| 39 |
+
- **BiLSTM models**: Both transition and graph-based variants [20]
|
| 40 |
+
- **VnCoreNLP**: 73.39% LAS, integrated toolkit [24]
|
| 41 |
+
- **Joint models**: First multi-task (segmentation + POS + parsing) [21]
|
| 42 |
+
|
| 43 |
+
### Phase 3: Pre-trained LM Era (2020-present)
|
| 44 |
+
- **PhoBERT** (2020): Vietnamese-specific pre-trained model, +3-5% LAS improvement [26]
|
| 45 |
+
- **PhoNLP** (2021): Multi-task learning with PhoBERT, SOTA 79.11% LAS [25]
|
| 46 |
+
- **Trankit** (2021): XLM-R-based multilingual, 64.76% LAS on VTB [28]
|
| 47 |
+
- **HPSG parser** (2024): Constituency-dependency hybrid, 78.42% LAS [22]
|
| 48 |
+
- **DGDT** (2024): Domain generalization benchmark, 3-5% cross-domain degradation [5]
|
| 49 |
+
|
| 50 |
+
## Open Challenges
|
| 51 |
+
|
| 52 |
+
1. **Ceiling effect on VnDT**: LAS has plateaued around 79% despite larger models, suggesting data quality/size limits
|
| 53 |
+
2. **VTB quality gap**: 15-point LAS gap between VnDT and VTB benchmarks for similar-capability models
|
| 54 |
+
3. **Domain generalization**: 3-5% LAS degradation on out-of-domain data (DGDT benchmark)
|
| 55 |
+
4. **Morphological features**: Vietnamese treebanks lack FEATS annotation
|
| 56 |
+
5. **Scale**: Largest Vietnamese UD treebank has only 3,323 sentences (vs. 25,000+ for English, Czech, etc.)
|
| 57 |
+
|
| 58 |
+
## Relevance to UDD-1
|
| 59 |
+
|
| 60 |
+
UDD-1 addresses several gaps:
|
| 61 |
+
- **Scale**: 10,000 sentences (3x larger than UD_Vietnamese-VTB)
|
| 62 |
+
- **Domain**: Legal domain (first Vietnamese UD treebank outside news)
|
| 63 |
+
- **Methodology**: Silver-standard with rule-based post-processing (following UD-English-CHILDES approach)
|
| 64 |
+
- **Availability**: Public on HuggingFace (open access)
|
| 65 |
+
|
| 66 |
+
The parsing architecture used in UDD-1 (Underthesea's biaffine parser with BERT) is closely related to the models achieving 75-79% LAS on VnDT, suggesting similar quality potential with appropriate post-processing.
|
| 67 |
+
|
| 68 |
+
## Reference Numbers
|
| 69 |
+
|
| 70 |
+
Numbers in brackets [N] refer to paper entries in [papers.md](papers.md).
|
references/research_vietnamese_ud_annotation/README.md
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Research: Annotation Guidelines for Vietnamese Universal Dependency Treebanks
|
| 2 |
+
|
| 3 |
+
**Topic**: Annotation guidelines used in Vietnamese UD treebanks
|
| 4 |
+
**Date**: 2026-02-08
|
| 5 |
+
**Related files**: [papers.md](papers.md) | [comparison.md](comparison.md) | [guidelines_summary.md](guidelines_summary.md) | [bibliography.bib](bibliography.bib)
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Key Findings
|
| 10 |
+
|
| 11 |
+
### 1. Vietnamese UD Treebank Ecosystem
|
| 12 |
+
|
| 13 |
+
There are five major Vietnamese treebanks, of which only one (UD_Vietnamese-VTB) follows UD conventions:
|
| 14 |
+
|
| 15 |
+
| Treebank | Type | Size | Year | Annotation |
|
| 16 |
+
|----------|------|------|------|------------|
|
| 17 |
+
| VietTreebank/VLSP | Constituency | ~10,374 sent | 2009 | Word seg + POS + bracketing |
|
| 18 |
+
| NIIVTB | Constituency | 20,588 sent | 2016-2018 | 33 POS, 18 phrase types, 22 functional tags |
|
| 19 |
+
| VnDT | Dependency | 10,200 sent | 2014 | 33 Stanford-adapted dep labels |
|
| 20 |
+
| UD_Vietnamese-VTB | Dependency (UD) | 3,323 sent | 2016 | 17 UPOS, 50 relation subtypes |
|
| 21 |
+
| BKTreebank | Dependency | ~6,900 sent | 2018 | Custom POS + dependency, from scratch |
|
| 22 |
+
|
| 23 |
+
### 2. Vietnamese-Specific UD Annotation Decisions
|
| 24 |
+
|
| 25 |
+
The following conventions are established in UD_Vietnamese-VTB:
|
| 26 |
+
|
| 27 |
+
- **Tokenization**: Spaces appear between syllables, not word boundaries. Multi-syllable words are allowed as single tokens (4,152 word types with spaces in VTB).
|
| 28 |
+
- **Copula**: Only `là` is the copula, tagged AUX with `cop` relation.
|
| 29 |
+
- **Passive**: `được` (favorable) and `bị` (unfavorable) serve as passive auxiliaries via `aux:pass`.
|
| 30 |
+
- **Classifiers**: `clf` relation connects classifiers (NOUN) to numerals (NUM) or determiners (DET).
|
| 31 |
+
- **Serial verbs**: `compound:svc` for serial verb constructions (511 instances, 89% VERB-VERB).
|
| 32 |
+
- **Reduplication**: `compound:redup` and `flat:redup` (rare in VTB).
|
| 33 |
+
- **Determiners**: Preverbal quantifiers (`những, các, mỗi`) use `det`; postnominal demonstratives (`ấy, này, đó`) use `det:pmod`.
|
| 34 |
+
- **No morphological features**: Vietnamese words show no inflection. Only `NumType=Card` is used for numerals.
|
| 35 |
+
|
| 36 |
+
### 3. XPOS to UPOS Mapping
|
| 37 |
+
|
| 38 |
+
The Vietnamese XPOS tagset (18 tags) maps to UPOS as follows:
|
| 39 |
+
|
| 40 |
+
| XPOS | UPOS | Description |
|
| 41 |
+
|------|------|-------------|
|
| 42 |
+
| N | NOUN | Common noun |
|
| 43 |
+
| Np | PROPN | Proper noun |
|
| 44 |
+
| Nc | NOUN | Classifier noun |
|
| 45 |
+
| Nu | NOUN | Unit noun |
|
| 46 |
+
| V | VERB | Verb |
|
| 47 |
+
| A | ADJ | Adjective |
|
| 48 |
+
| P | PRON | Pronoun |
|
| 49 |
+
| R | ADV | Adverb |
|
| 50 |
+
| L | DET | Determiner |
|
| 51 |
+
| M | NUM | Numeral |
|
| 52 |
+
| E | ADP | Preposition |
|
| 53 |
+
| C | CCONJ | Coordinating conjunction |
|
| 54 |
+
| CC | CCONJ | Coordinating conjunction |
|
| 55 |
+
| SC | SCONJ | Subordinating conjunction |
|
| 56 |
+
| I | INTJ | Interjection |
|
| 57 |
+
| T | PART/AUX | Particle/auxiliary (context-dependent) |
|
| 58 |
+
| Y | X | Abbreviation |
|
| 59 |
+
| X | X | Unknown |
|
| 60 |
+
|
| 61 |
+
### 4. Auxiliary Verbs (12 lemmas in VTB)
|
| 62 |
+
|
| 63 |
+
| Lemma | Function | UPOS Relation |
|
| 64 |
+
|-------|----------|---------------|
|
| 65 |
+
| là | Copula | cop |
|
| 66 |
+
| được | Passive (favorable) / ability | aux:pass / aux |
|
| 67 |
+
| bị | Passive (unfavorable) | aux:pass |
|
| 68 |
+
| phải | Obligation (must) | aux |
|
| 69 |
+
| có thể | Ability (can) | aux |
|
| 70 |
+
| cần | Necessity (need) | aux |
|
| 71 |
+
| nên | Advice (should) | aux |
|
| 72 |
+
| muốn | Desire (want) | aux |
|
| 73 |
+
| không thể | Inability (cannot) | aux |
|
| 74 |
+
| chưa thể | Not yet able | aux |
|
| 75 |
+
| chắc chắn | Certainty | aux |
|
| 76 |
+
| có vẻ | Evidential (seem) | aux |
|
| 77 |
+
| định | Intention (intend) | aux |
|
| 78 |
+
|
| 79 |
+
### 5. Dependency Relation Inventory (50 subtypes in VTB)
|
| 80 |
+
|
| 81 |
+
Key Vietnamese-specific subtypes:
|
| 82 |
+
|
| 83 |
+
| Subtype | Count | Description |
|
| 84 |
+
|---------|------:|-------------|
|
| 85 |
+
| compound:svc | 511 | Serial verb constructions |
|
| 86 |
+
| compound:vmod | 721 | Verbal modifier of noun |
|
| 87 |
+
| compound:redup | 4 | Reduplication compound |
|
| 88 |
+
| compound:pron | -- | Pronominal compound |
|
| 89 |
+
| compound:prt | -- | Particle compound |
|
| 90 |
+
| acl:tonp | 97 | Vietnamese adnominal clause |
|
| 91 |
+
| acl:subj | -- | Subject adnominal clause |
|
| 92 |
+
| acl:tmod | -- | Temporal modifier clause |
|
| 93 |
+
| det:pmod | -- | Pronominal determiner (demonstratives) |
|
| 94 |
+
| advmod:neg | -- | Negative adverb modifier |
|
| 95 |
+
| advmod:adj | -- | Adjectival adverb modifier |
|
| 96 |
+
| flat:redup | 3 | Reduplicative flat structure |
|
| 97 |
+
| flat:date | -- | Date expression |
|
| 98 |
+
| flat:time | -- | Time expression |
|
| 99 |
+
| flat:name | -- | Multi-word name |
|
| 100 |
+
| clf | 250 | Classifier |
|
| 101 |
+
|
| 102 |
+
### 6. Known Annotation Challenges
|
| 103 |
+
|
| 104 |
+
1. **Word segmentation ambiguity**: Spaces separate syllables, not words, creating systematic ambiguity.
|
| 105 |
+
2. **POS ambiguity**: High-frequency words have multiple POS categories (e.g., `có` as VERB/ADV/PART, `về` as VERB/ADP/ADV).
|
| 106 |
+
3. **Copula drop**: Vietnamese can drop `là` in certain constructions.
|
| 107 |
+
4. **Classifier system**: Classifiers interact with determiners and numerals in complex ways.
|
| 108 |
+
5. **Serial verb constructions**: Multiple verbs functioning as single predicates require `compound:svc`.
|
| 109 |
+
6. **Topic-comment structure**: Vietnamese is topic-prominent, creating challenges for subject identification.
|
| 110 |
+
7. **No morphological features**: Vietnamese lacks inflection, making FEATS annotation minimal.
|
| 111 |
+
8. **15% non-compliant trees**: HPSG parser analysis (2024) found ~15% of VietTreebank trees fail HPSG compliance.
|
| 112 |
+
9. **VTB quality gap**: 15-point LAS gap between VnDT (79%) and VTB (65%) benchmarks, attributed to conversion quality.
|
| 113 |
+
|
| 114 |
+
### 7. Documentation Gaps
|
| 115 |
+
|
| 116 |
+
Vietnamese UD documentation is sparse compared to well-documented languages:
|
| 117 |
+
- No Vietnamese-specific POS tag pages on universaldependencies.org (all return 404)
|
| 118 |
+
- No Vietnamese tokenization page
|
| 119 |
+
- Only a subset of dependency relation pages have Vietnamese-specific content
|
| 120 |
+
- Development occurs outside the UD repository, limiting community contribution
|
| 121 |
+
|
| 122 |
+
## Relevance to UDD-1
|
| 123 |
+
|
| 124 |
+
The UDD-1 project's `convert_to_ud.py` implements most VTB conventions but with some differences:
|
| 125 |
+
|
| 126 |
+
1. **Extended AUX list**: UDD-1 uses 20 auxiliary lemmas vs. VTB's 12 (adds `giả`, `quyết`, `thôi`, `thể`, `xong`, `nghĩa là`, `nhằm`).
|
| 127 |
+
2. **UPOS mapping**: Maps `C` to `CCONJ` (VTB uses `C` for subordinating). The `SC` tag maps to `SCONJ`.
|
| 128 |
+
3. **Post-processing**: 4-pass pipeline enforcing UD structural constraints not present in VTB conversion.
|
| 129 |
+
4. **Non-standard subtypes**: UDD-1 introduces `acl:subj` and other subtypes not in VTB.
|
| 130 |
+
|
| 131 |
+
## Sources
|
| 132 |
+
|
| 133 |
+
- [UD Vietnamese Index](https://universaldependencies.org/vi/index.html)
|
| 134 |
+
- [UD_Vietnamese-VTB Statistics](https://universaldependencies.org/treebanks/vi_vtb/index.html)
|
| 135 |
+
- [UD_Vietnamese-VTB GitHub](https://github.com/UniversalDependencies/UD_Vietnamese-VTB)
|
| 136 |
+
- [VnDT GitHub](https://github.com/datquocnguyen/VnDT)
|
| 137 |
+
- [NIIVTB GitHub](https://github.com/mynlp/niivtb)
|
| 138 |
+
- [VLSP 2020 Shared Task](https://aclanthology.org/2020.vlsp-1.15/)
|
| 139 |
+
- [BKTreebank Paper](https://aclanthology.org/L18-1341/)
|
references/research_vietnamese_ud_annotation/bibliography.bib
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
% Vietnamese UD Annotation Guidelines -- Bibliography
|
| 2 |
+
% Last updated: 2026-02-08
|
| 3 |
+
|
| 4 |
+
% ============================================================
|
| 5 |
+
% Category A: Vietnamese Constituency Treebanks
|
| 6 |
+
% ============================================================
|
| 7 |
+
|
| 8 |
+
@inproceedings{nguyen-vu-etal-2009-viettreebank,
|
| 9 |
+
title = "Building a Large Syntactically-Annotated Corpus of {V}ietnamese",
|
| 10 |
+
author = "Nguyen, Phuong-Thai and Vu, Xuan-Luong and Nguyen, Thi-Minh-Huyen and Nguyen, Van-Hiep and Le, Hong-Phuong",
|
| 11 |
+
booktitle = "Proceedings of the Third Linguistic Annotation Workshop ({LAW} {III})",
|
| 12 |
+
year = "2009",
|
| 13 |
+
publisher = "Association for Computational Linguistics",
|
| 14 |
+
pages = "182--185",
|
| 15 |
+
url = "https://aclanthology.org/W09-3035/",
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
@article{nguyen-le-ho-2015-treebank-entropy,
|
| 19 |
+
title = "{V}ietnamese Treebank Construction and Entropy-Based Error Detection",
|
| 20 |
+
author = "Nguyen, Phuong-Thai and Le, Anh-Cuong and Ho, Tu Bao",
|
| 21 |
+
journal = "Language Resources and Evaluation",
|
| 22 |
+
volume = "49",
|
| 23 |
+
pages = "487--519",
|
| 24 |
+
year = "2015",
|
| 25 |
+
publisher = "Springer",
|
| 26 |
+
url = "https://link.springer.com/article/10.1007/s10579-015-9308-5",
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
@inproceedings{nguyen-miyao-etal-2016-niivtb,
|
| 30 |
+
title = "Challenges and Solutions for Consistent Annotation of {V}ietnamese Treebank",
|
| 31 |
+
author = "Nguyen, Quy and Miyao, Yusuke and Le, Ha and Nguyen, Ngan",
|
| 32 |
+
booktitle = "Proceedings of the Tenth International Conference on Language Resources and Evaluation ({LREC} 2016)",
|
| 33 |
+
year = "2016",
|
| 34 |
+
pages = "1532--1539",
|
| 35 |
+
url = "https://aclanthology.org/L16-1243/",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
@article{nguyen-miyao-etal-2018-niivtb-journal,
|
| 39 |
+
title = "Ensuring Annotation Consistency and Accuracy for {V}ietnamese Treebank",
|
| 40 |
+
author = "Nguyen, Quy T. and Miyao, Yusuke and Le, Ha T. T. and Nguyen, Nhung T. H.",
|
| 41 |
+
journal = "Language Resources and Evaluation",
|
| 42 |
+
year = "2018",
|
| 43 |
+
publisher = "Springer",
|
| 44 |
+
url = "https://link.springer.com/article/10.1007/s10579-017-9398-3",
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
% ============================================================
|
| 48 |
+
% Category B: Vietnamese Dependency Treebanks
|
| 49 |
+
% ============================================================
|
| 50 |
+
|
| 51 |
+
@inproceedings{nguyen-ha-etal-2013-vndt-first,
|
| 52 |
+
title = "Building a Treebank for {V}ietnamese Dependency Parsing",
|
| 53 |
+
author = "Nguyen, T. L. and Ha, M. L. and Nguyen, V. H. and Nguyen, T. M. H. and Le-Hong, P.",
|
| 54 |
+
booktitle = "Proceedings of the 10th IEEE RIVF International Conference on Computing and Communication Technologies",
|
| 55 |
+
year = "2013",
|
| 56 |
+
pages = "147--151",
|
| 57 |
+
publisher = "IEEE",
|
| 58 |
+
url = "https://ieeexplore.ieee.org/document/6719884",
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
@inproceedings{nguyen-nguyen-etal-2014-vndt,
|
| 62 |
+
title = "From Treebank Conversion to Automatic Dependency Parsing for {V}ietnamese",
|
| 63 |
+
author = "Nguyen, Dat Quoc and Nguyen, Dai Quoc and Pham, Son Bao and Nguyen, Phuong-Thai and Nguyen, Minh Le",
|
| 64 |
+
booktitle = "Proceedings of the 19th International Conference on Application of Natural Language to Information Systems ({NLDB} 2014)",
|
| 65 |
+
year = "2014",
|
| 66 |
+
pages = "196--207",
|
| 67 |
+
publisher = "Springer",
|
| 68 |
+
series = "LNCS",
|
| 69 |
+
volume = "8455",
|
| 70 |
+
url = "https://link.springer.com/chapter/10.1007/978-3-319-07983-7_26",
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
@inproceedings{nguyen-2018-bktreebank,
|
| 74 |
+
title = "{BKT}reebank: Building a {V}ietnamese Dependency Treebank",
|
| 75 |
+
author = "Nguyen, Kiem-Hieu",
|
| 76 |
+
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
|
| 77 |
+
year = "2018",
|
| 78 |
+
url = "https://aclanthology.org/L18-1341/",
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
@inproceedings{truong-etal-2022-conversion,
|
| 82 |
+
title = "Converting a Constituency Treebank to Dependency Treebank for {V}ietnamese",
|
| 83 |
+
author = "Truong, et al.",
|
| 84 |
+
booktitle = "IEEE Conference Proceedings",
|
| 85 |
+
year = "2022",
|
| 86 |
+
url = "https://ieeexplore.ieee.org/document/10013806/",
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
% ============================================================
|
| 90 |
+
% Category C: VLSP Shared Tasks
|
| 91 |
+
% ============================================================
|
| 92 |
+
|
| 93 |
+
@inproceedings{ha-nguyen-etal-2020-vlsp-udp,
|
| 94 |
+
title = "{VLSP} 2020 Shared Task: Universal Dependency Parsing for {V}ietnamese",
|
| 95 |
+
author = "Ha, My Linh and Nguyen, Thi Minh Huyen and Vu, Xuan Luong and Nguyen, Thi Luong and Phan, Thi Hue and Le, Van Cuong",
|
| 96 |
+
booktitle = "Proceedings of the 7th International Workshop on Vietnamese Language and Speech Processing ({VLSP} 2020)",
|
| 97 |
+
year = "2020",
|
| 98 |
+
pages = "77--83",
|
| 99 |
+
url = "https://aclanthology.org/2020.vlsp-1.15/",
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
% ============================================================
|
| 103 |
+
% Category D: Error Analysis and Quality Studies
|
| 104 |
+
% ============================================================
|
| 105 |
+
|
| 106 |
+
@inproceedings{nguyen-etal-2024-hpsg-vietnamese,
|
| 107 |
+
title = "An Attempt to Develop a Neural Parser based on Simplified Head-Driven Phrase Structure Grammar on {V}ietnamese",
|
| 108 |
+
author = "Nguyen, Duc-Vu and Phan, Thang Chau and Nguyen, Quoc-Nam and Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy",
|
| 109 |
+
booktitle = "Proceedings of SoICT 2024",
|
| 110 |
+
year = "2024",
|
| 111 |
+
note = "arXiv:2411.17270",
|
| 112 |
+
url = "https://arxiv.org/abs/2411.17270",
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
@article{nguyen-nguyen-2019-error-analysis,
|
| 116 |
+
title = "Error Analysis for {V}ietnamese Dependency Parsing",
|
| 117 |
+
author = "Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy",
|
| 118 |
+
journal = "arXiv preprint arXiv:1911.03724",
|
| 119 |
+
year = "2019",
|
| 120 |
+
url = "https://arxiv.org/abs/1911.03724",
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
% ============================================================
|
| 124 |
+
% Category E: NLP Toolkits
|
| 125 |
+
% ============================================================
|
| 126 |
+
|
| 127 |
+
@inproceedings{vu-nguyen-etal-2018-vncorenlp,
|
| 128 |
+
title = "{VnCoreNLP}: A {V}ietnamese Natural Language Processing Toolkit",
|
| 129 |
+
author = "Vu, Thanh and Nguyen, Dat Quoc and Nguyen, Dai Quoc and Dras, Mark and Johnson, Mark",
|
| 130 |
+
booktitle = "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations",
|
| 131 |
+
year = "2018",
|
| 132 |
+
url = "https://aclanthology.org/N18-5012/",
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
@inproceedings{nguyen-nguyen-2021-phonlp,
|
| 136 |
+
title = "{PhoNLP}: A Joint Multi-task Learning Model for {V}ietnamese Part-of-Speech Tagging, Named Entity Recognition and Dependency Parsing",
|
| 137 |
+
author = "Nguyen, Linh The and Nguyen, Dat Quoc",
|
| 138 |
+
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations",
|
| 139 |
+
year = "2021",
|
| 140 |
+
url = "https://aclanthology.org/2021.naacl-demos.1/",
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
% ============================================================
|
| 144 |
+
% Category F: Supplementary Resources
|
| 145 |
+
% ============================================================
|
| 146 |
+
|
| 147 |
+
@inproceedings{bert-sentence-recommendation-2023,
|
| 148 |
+
title = "{BERT}-Based Sentence Recommendation for Building {V}ietnamese Universal Dependency Treebank",
|
| 149 |
+
booktitle = "Future Data and Security Engineering ({FDSE} 2023)",
|
| 150 |
+
year = "2023",
|
| 151 |
+
publisher = "Springer",
|
| 152 |
+
url = "https://link.springer.com/chapter/10.1007/978-981-99-8296-7_28",
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
@inproceedings{vlo-2021,
|
| 156 |
+
title = "{VLO} V1.1 -- A {V}ietnamese Lexicon Ontology for Universal Dependency Parsing",
|
| 157 |
+
booktitle = "IEEE Conference Proceedings",
|
| 158 |
+
year = "2021",
|
| 159 |
+
url = "https://ieeexplore.ieee.org/document/9353080/",
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
@article{do-nguyen-2021-semantic-dep,
|
| 163 |
+
title = "Sentential Semantic Dependency Parsing for {V}ietnamese",
|
| 164 |
+
author = "Do and Nguyen",
|
| 165 |
+
journal = "SN Computer Science",
|
| 166 |
+
year = "2021",
|
| 167 |
+
publisher = "Springer",
|
| 168 |
+
url = "https://link.springer.com/article/10.1007/s42979-021-00715-4",
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
@inproceedings{nguyen-vu-etal-2017-segmentation-pos,
|
| 172 |
+
title = "From Word Segmentation to {POS} Tagging for {V}ietnamese",
|
| 173 |
+
author = "Nguyen, Dat Quoc and Vu, Thanh and Nguyen, Dai Quoc and Dras, Mark and Johnson, Mark",
|
| 174 |
+
booktitle = "Proceedings of the Australasian Language Technology Association Workshop ({ALTA} 2017)",
|
| 175 |
+
year = "2017",
|
| 176 |
+
url = "https://aclanthology.org/U17-1013/",
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
@inproceedings{le-hong-etal-2010-tag-extraction,
|
| 180 |
+
title = "Automated Extraction of Tree Adjoining Grammars from a Treebank for {V}ietnamese",
|
| 181 |
+
author = "Le-Hong, Phuong and Nguyen, Thi Minh Huyen and Nguyen, Phuong Thai and Roussanaly, Azim",
|
| 182 |
+
booktitle = "Proceedings of the 10th International Workshop on Tree Adjoining Grammar and Related Frameworks ({TAG}+10)",
|
| 183 |
+
year = "2010",
|
| 184 |
+
url = "https://aclanthology.org/W10-4421/",
|
| 185 |
+
}
|
references/research_vietnamese_ud_annotation/comparison.md
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Comparison Tables: Vietnamese UD Annotation Guidelines
|
| 2 |
+
|
| 3 |
+
## Table 1: Vietnamese Treebank Annotation Schemes
|
| 4 |
+
|
| 5 |
+
| Feature | VietTreebank | NIIVTB | VnDT | UD_Vietnamese-VTB | BKTreebank | UDD-1 |
|
| 6 |
+
|---------|:------------:|:------:|:----:|:-----------------:|:----------:|:-----:|
|
| 7 |
+
| **Type** | Constituency | Constituency | Dependency | Dependency (UD) | Dependency | Dependency (UD) |
|
| 8 |
+
| **Size** | ~10,374 sent | 20,588 sent | 10,200 sent | 3,323 sent | ~6,900 sent | 10,000 sent |
|
| 9 |
+
| **Domain** | News | News | News | News | News | Legal |
|
| 10 |
+
| **POS tags** | Custom | 33 | Custom | 17 UPOS | Custom | 15 UPOS + XPOS |
|
| 11 |
+
| **Dep labels** | N/A | N/A | 33 | 50 subtypes | Custom | 77 subtypes |
|
| 12 |
+
| **Annotation** | Manual | Manual | Auto-converted | Auto-converted | Manual | Silver (neural) |
|
| 13 |
+
| **IAA** | -- | >90% | -- | -- | >90% | N/A |
|
| 14 |
+
| **Format** | Bracket | Bracket | CoNLL | CoNLL-U | Custom | CoNLL-U |
|
| 15 |
+
| **UD compliant** | No | No | No | Yes | No | Yes |
|
| 16 |
+
| **Public** | Via VLSP | CC BY-NC-SA 4.0 | Research only | CC BY-SA 4.0 | Limited | HuggingFace |
|
| 17 |
+
| **Year** | 2009 | 2016-2018 | 2014 | 2016 | 2018 | 2026 |
|
| 18 |
+
|
| 19 |
+
## Table 2: POS Tagset Comparison
|
| 20 |
+
|
| 21 |
+
| Vietnamese XPOS | UPOS (VTB) | UPOS (UDD-1) | NIIVTB Tags | Description |
|
| 22 |
+
|:---------------:|:----------:|:------------:|:-----------:|-------------|
|
| 23 |
+
| N | NOUN | NOUN | N, Nc, Nb | Common noun |
|
| 24 |
+
| Np | PROPN | PROPN | NNP | Proper noun |
|
| 25 |
+
| Nc | NOUN | NOUN | Nc | Classifier noun |
|
| 26 |
+
| Nu | NOUN | NOUN | Nb | Unit noun |
|
| 27 |
+
| V | VERB | VERB | V, Ve, Vc | Verb |
|
| 28 |
+
| A | ADJ | ADJ | A | Adjective |
|
| 29 |
+
| P | PRON | PRON | P | Pronoun |
|
| 30 |
+
| R | ADV | ADV | R | Adverb |
|
| 31 |
+
| L | DET | DET | D | Determiner |
|
| 32 |
+
| M | NUM | NUM | Num | Numeral |
|
| 33 |
+
| E | ADP | ADP | E | Preposition |
|
| 34 |
+
| C | SCONJ | CCONJ* | C | Conjunction |
|
| 35 |
+
| CC | CCONJ | CCONJ | CC | Coordinating conjunction |
|
| 36 |
+
| SC | -- | SCONJ | SC | Subordinating conjunction |
|
| 37 |
+
| I | INTJ | INTJ | I | Interjection |
|
| 38 |
+
| T | AUX/PART | PART/AUX | T | Auxiliary/particle |
|
| 39 |
+
| Y | X | X | Y | Abbreviation |
|
| 40 |
+
| X | X | X | X | Unknown |
|
| 41 |
+
| CH | PUNCT | PUNCT | CH | Punctuation |
|
| 42 |
+
|
| 43 |
+
*Note: UDD-1 maps `C` to CCONJ while VTB uses it for subordinating. UDD-1 adds `SC` for SCONJ.
|
| 44 |
+
|
| 45 |
+
## Table 3: Auxiliary Verb Comparison
|
| 46 |
+
|
| 47 |
+
| Lemma | VTB (12) | UDD-1 (20) | Function | Relation |
|
| 48 |
+
|-------|:--------:|:----------:|----------|----------|
|
| 49 |
+
| là | Yes | Yes | Copula | cop |
|
| 50 |
+
| được | Yes | Yes | Passive (favorable) / ability | aux:pass / aux |
|
| 51 |
+
| bị | Yes | Yes | Passive (unfavorable) | aux:pass |
|
| 52 |
+
| phải | Yes | Yes | Obligation (must) | aux |
|
| 53 |
+
| có thể | Yes | Yes | Ability (can) | aux |
|
| 54 |
+
| cần | Yes | Yes | Necessity (need) | aux |
|
| 55 |
+
| nên | Yes | Yes | Advice (should) | aux |
|
| 56 |
+
| muốn | Yes | Yes | Desire (want) | aux |
|
| 57 |
+
| không thể | Yes | Yes | Inability | aux |
|
| 58 |
+
| chưa thể | Yes | Yes | Not yet able | aux |
|
| 59 |
+
| chắc chắn | Yes | Yes | Certainty | aux |
|
| 60 |
+
| có vẻ | Yes | Yes | Evidential (seem) | aux |
|
| 61 |
+
| định | -- | Yes | Intention (intend) | aux |
|
| 62 |
+
| giả | -- | Yes | Hypothetical | aux |
|
| 63 |
+
| quyết | -- | Yes | Determination | aux |
|
| 64 |
+
| thôi | -- | Yes | Cessation | aux |
|
| 65 |
+
| thể | -- | Yes | Possibility | aux |
|
| 66 |
+
| xong | -- | Yes | Completion | aux |
|
| 67 |
+
| nghĩa là | -- | Yes | Meaning (i.e.) | aux |
|
| 68 |
+
| nhằm | -- | Yes | Purpose (aim to) | aux |
|
| 69 |
+
|
| 70 |
+
## Table 4: Vietnamese-Specific Dependency Subtypes
|
| 71 |
+
|
| 72 |
+
| Subtype | VTB Count | VTB % | Direction | POS Pattern | Description |
|
| 73 |
+
|---------|----------:|------:|-----------|-------------|-------------|
|
| 74 |
+
| compound:svc | 511 | 1% | 100% L→R | VERB-VERB (89%) | Serial verb construction |
|
| 75 |
+
| compound:vmod | 721 | 1% | 100% L→R | NOUN-VERB (96%) | Verbal modifier of noun |
|
| 76 |
+
| compound:redup | 4 | <0.1% | 100% L→R | VERB-VERB/ADV | Reduplication compound |
|
| 77 |
+
| compound:pron | -- | -- | -- | -- | Pronominal compound |
|
| 78 |
+
| compound:prt | -- | -- | -- | -- | Particle compound |
|
| 79 |
+
| acl:tonp | 97 | <0.2% | 100% L→R | NOUN-VERB (78%) | Vietnamese adnominal clause |
|
| 80 |
+
| det:pmod | -- | -- | L→R | NOUN-PRON | Postnominal demonstrative |
|
| 81 |
+
| clf | 250 | 0.4% | 83% L→R | NUM-NOUN (53%) | Classifier |
|
| 82 |
+
| clf:det | -- | -- | -- | DET-NOUN | Classifier with determiner |
|
| 83 |
+
| flat:redup | 3 | <0.1% | 100% L→R | VERB-ADV/VERB | Reduplicative flat |
|
| 84 |
+
| flat:date | -- | -- | L→R | -- | Date expression |
|
| 85 |
+
| flat:time | -- | -- | L→R | -- | Time expression |
|
| 86 |
+
| flat:name | -- | -- | L→R | -- | Multi-word name |
|
| 87 |
+
| advmod:neg | -- | -- | R→L | VERB-ADV | Negative adverb |
|
| 88 |
+
| advmod:adj | -- | -- | -- | ADJ-ADV | Adjectival adverb |
|
| 89 |
+
| obl:tmod | -- | -- | -- | VERB-NOUN | Temporal oblique |
|
| 90 |
+
|
| 91 |
+
## Table 5: VTB POS Token Distribution
|
| 92 |
+
|
| 93 |
+
| UPOS | Tokens | % | Lemmas | Top Lemma | Key Ambiguity |
|
| 94 |
+
|------|-------:|--:|-------:|-----------|---------------|
|
| 95 |
+
| NOUN | 16,489 | 28% | 2,694 | người | ông (NOUN/PRON), năm (NOUN/NUM), sau (NOUN/ADP/ADJ) |
|
| 96 |
+
| VERB | 10,868 | 19% | 2,122 | có | có (VERB/ADV/PART), về (VERB/ADP/ADV), cho (ADP/VERB/ADV) |
|
| 97 |
+
| PUNCT | ~5,000+ | ~9% | -- | . | -- |
|
| 98 |
+
| ADV | 4,144 | 7% | 177 | không | được (ADV/AUX/VERB), cũng (ADV) |
|
| 99 |
+
| ADP | 3,392 | 6% | 45 | của | cho (ADP/VERB), về (ADP/VERB), đến (ADP/VERB) |
|
| 100 |
+
| ADJ | 3,380 | 6% | 1,058 | nhiều | nhiều (ADJ/DET), hơn (ADJ/ADV) |
|
| 101 |
+
| PROPN | 2,515 | 4% | 744 | Hùng | -- |
|
| 102 |
+
| PRON | 2,257 | 4% | 66 | đó | đó (PRON), nay (PRON) |
|
| 103 |
+
| NUM | 1,828 | 3% | 308 | một | một (NUM/DET) |
|
| 104 |
+
| SCONJ | 1,511 | 3% | 72 | thì | là (SCONJ/AUX/CCONJ), nhưng (SCONJ/CCONJ) |
|
| 105 |
+
| AUX | 1,282 | 2% | 13 | là | là (AUX/SCONJ), được (AUX/ADV/VERB), phải (AUX/ADJ/VERB) |
|
| 106 |
+
| DET | 969 | 2% | 37 | những | -- |
|
| 107 |
+
| CCONJ | 730 | 1% | 6 | và | -- |
|
| 108 |
+
| PART | 315 | 1% | 57 | ngay | -- |
|
| 109 |
+
| INTJ | -- | <1% | -- | -- | -- |
|
| 110 |
+
| SYM | -- | <1% | -- | -- | -- |
|
| 111 |
+
| X | -- | <1% | -- | -- | -- |
|
| 112 |
+
|
| 113 |
+
## Table 6: VTB Dependency Relation Distribution
|
| 114 |
+
|
| 115 |
+
| Relation | Count | % | Direction | Top POS Pair |
|
| 116 |
+
|----------|------:|--:|-----------|-------------|
|
| 117 |
+
| obj | 4,362 | 8% | 100% L→R | VERB-NOUN (87%) |
|
| 118 |
+
| nsubj | 4,054 | 7% | 100% R→L | VERB-NOUN (54%), VERB-PROPN (17%) |
|
| 119 |
+
| advmod | 3,188 | 5% | 80% R→L | VERB-ADV (73%) |
|
| 120 |
+
| case | 3,146 | 5% | 100% R→L | NOUN-ADP (74%) |
|
| 121 |
+
| nmod | 2,505 | 4% | 99% L→R | NOUN-NOUN (76%) |
|
| 122 |
+
| compound | 1,962 | 3% | 99% L→R | NOUN-NOUN (59%) |
|
| 123 |
+
| amod | 1,126 | 2% | 94% L→R | NOUN-ADJ (92%) |
|
| 124 |
+
| det | 1,052 | 2% | 100% R→L | NOUN-DET (88%) |
|
| 125 |
+
| conj | -- | -- | L→R | -- |
|
| 126 |
+
| cop | 481 | 1% | 98% R→L | NOUN-AUX (71%) |
|
| 127 |
+
| clf | 250 | 0.4% | 83% L→R | NUM-NOUN (53%) |
|
| 128 |
+
|
| 129 |
+
## Table 7: NIIVTB Annotation Scheme Detail
|
| 130 |
+
|
| 131 |
+
| Layer | Category | Count | Examples |
|
| 132 |
+
|-------|----------|------:|---------|
|
| 133 |
+
| POS Tags | Total | 33 | N, Np, Nc, Nu, V, Ve, Vc, A, P, R, L/D, M/Num, E, C, CC, SC, I, T, Y, X, CH |
|
| 134 |
+
| Phrase Types | Total | 18 | NP, VP, PP, ADJP, ADVP, S, SBAR, QP, etc. |
|
| 135 |
+
| Functional Tags | Total | 22 | SBJ, DOB, IOB, TMP, LOC, MNR, PRP, DIR, EXT, etc. |
|
| 136 |
+
| Null Elements | Total | 7 | Traces, ellipses, null complementizers |
|
| 137 |
+
| Word-Internal | Subtags | -- | Sino-Vietnamese + native compound elements |
|
references/research_vietnamese_ud_annotation/guidelines_summary.md
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Vietnamese UD Annotation Guidelines: Detailed Summary
|
| 2 |
+
|
| 3 |
+
**Last updated**: 2026-02-08
|
| 4 |
+
|
| 5 |
+
This document synthesizes the annotation conventions established across Vietnamese UD treebanks, focusing on decisions relevant to the UDD-1 project.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## 1. Tokenization and Word Segmentation
|
| 10 |
+
|
| 11 |
+
### The Fundamental Challenge
|
| 12 |
+
|
| 13 |
+
Vietnamese is an **isolating language** where spaces separate **syllables**, not words. A single word can consist of multiple syllables separated by spaces:
|
| 14 |
+
|
| 15 |
+
- "điện thoại" (telephone) = 2 syllables, 1 word
|
| 16 |
+
- "công ty" (company) = 2 syllables, 1 word
|
| 17 |
+
- "Hồ Chí Minh" (proper name) = 3 syllables, 1 word
|
| 18 |
+
|
| 19 |
+
### UD Decision
|
| 20 |
+
|
| 21 |
+
The UD project decided that **words with spaces should be allowed** in Vietnamese treebanks. The alternative (treating all polysyllabic words as multiword expressions) would artificially make Vietnamese look different from all other languages. VTB contains **4,152 word types with spaces**.
|
| 22 |
+
|
| 23 |
+
### Segmentation Conventions
|
| 24 |
+
|
| 25 |
+
- Word segmentation is a **prerequisite** step before POS tagging and parsing
|
| 26 |
+
- Inter-annotator agreement for segmentation exceeds 90% (Nguyen et al., 2016)
|
| 27 |
+
- Tools: VnCoreNLP, Underthesea, RDRPOSTagger perform automatic segmentation
|
| 28 |
+
- Special cases:
|
| 29 |
+
- **Dates**: "28-2" as flat:date
|
| 30 |
+
- **Numbers and units**: specific segmentation rules
|
| 31 |
+
- **Abbreviations**: "TP.HCM" as single tokens
|
| 32 |
+
- **Hyphenated forms**: "knock-out" included (28 types in VTB)
|
| 33 |
+
|
| 34 |
+
### Implications for UDD-1
|
| 35 |
+
|
| 36 |
+
UDD-1 relies on Underthesea's word segmenter. Word segmentation errors propagate to all downstream annotations. The legal domain may contain domain-specific multi-word terms not seen in news training data.
|
| 37 |
+
|
| 38 |
+
---
|
| 39 |
+
|
| 40 |
+
## 2. POS Tagging Conventions
|
| 41 |
+
|
| 42 |
+
### 2.1 Vietnamese XPOS Tagset (18 tags)
|
| 43 |
+
|
| 44 |
+
Source: VLSP tagset, implemented in vnTagger and Underthesea.
|
| 45 |
+
|
| 46 |
+
| XPOS | Description | Examples |
|
| 47 |
+
|------|-------------|---------|
|
| 48 |
+
| N | Common noun | người (person), nhà (house) |
|
| 49 |
+
| Np | Proper noun | Hồ Chí Minh |
|
| 50 |
+
| Nc | Classifier noun | con, cái, chiếc |
|
| 51 |
+
| Nu | Unit noun | kg, mét, năm (year as unit) |
|
| 52 |
+
| V | Verb | có (have), làm (do) |
|
| 53 |
+
| A | Adjective | đẹp (beautiful), to (big) |
|
| 54 |
+
| P | Pronoun | tôi (I), họ (they) |
|
| 55 |
+
| R | Adverb | không (not), đã (already) |
|
| 56 |
+
| L | Determiner | các, những, mỗi |
|
| 57 |
+
| M | Numeral | một (one), hai (two) |
|
| 58 |
+
| E | Preposition | trong (in), trên (on) |
|
| 59 |
+
| C | Subordinating conjunction | vì (because), nếu (if) |
|
| 60 |
+
| CC | Coordinating conjunction | và (and), nhưng (but) |
|
| 61 |
+
| I | Interjection | ôi, chào |
|
| 62 |
+
| T | Auxiliary/modal | phải (must), có thể (can) |
|
| 63 |
+
| Y | Abbreviation | TP.HCM |
|
| 64 |
+
| Z | Bound morpheme | |
|
| 65 |
+
| X | Unknown | |
|
| 66 |
+
| CH | Punctuation | . , ! |
|
| 67 |
+
|
| 68 |
+
### 2.2 UPOS Mapping Decisions
|
| 69 |
+
|
| 70 |
+
Key mapping decisions and ambiguities:
|
| 71 |
+
|
| 72 |
+
**Unambiguous mappings:**
|
| 73 |
+
- N → NOUN, Np → PROPN, V → VERB, A → ADJ, P → PRON
|
| 74 |
+
- R → ADV, L → DET, M → NUM, E → ADP, I → INTJ, CH → PUNCT
|
| 75 |
+
|
| 76 |
+
**Ambiguous mappings requiring context:**
|
| 77 |
+
- **T (auxiliary/particle)**: Maps to AUX when the word is in the auxiliary list, PART otherwise. VTB uses 12 AUX lemmas; UDD-1 extends to 20.
|
| 78 |
+
- **C (conjunction)**: VTB uses C for subordinating conjunctions (→ SCONJ). UDD-1 maps C → CCONJ and introduces SC → SCONJ.
|
| 79 |
+
- **Nc (classifier noun)**: Maps to NOUN in UPOS (no classifier UPOS exists). Identified via `clf` relation.
|
| 80 |
+
- **Nu (unit noun)**: Maps to NOUN.
|
| 81 |
+
|
| 82 |
+
**High-frequency POS ambiguity in Vietnamese:**
|
| 83 |
+
|
| 84 |
+
| Word | Readings | VTB Distribution |
|
| 85 |
+
|------|----------|-----------------|
|
| 86 |
+
| có | VERB (547), ADV (35), PART (5) | "have/exist" vs. "indeed" vs. emphasis |
|
| 87 |
+
| được | AUX (251), ADV (205), VERB (26) | passive/able vs. "also" vs. "receive" |
|
| 88 |
+
| là | AUX (497), SCONJ (89), CCONJ (7), PART (3) | copula vs. subordinator vs. coordinator |
|
| 89 |
+
| phải | AUX (226), ADJ (16), VERB (10) | "must" vs. "right" vs. "suffer" |
|
| 90 |
+
| về | VERB (159), ADP (121), ADV (16) | "return" vs. "about/regarding" vs. directional |
|
| 91 |
+
| cho | ADP (263), VERB (148), ADV (11) | "for" vs. "give" vs. "indeed" |
|
| 92 |
+
| đi | VERB (222), ADV (20), PART (3) | "go" vs. directional vs. imperative particle |
|
| 93 |
+
|
| 94 |
+
---
|
| 95 |
+
|
| 96 |
+
## 3. Dependency Relation Conventions
|
| 97 |
+
|
| 98 |
+
### 3.1 Core Arguments
|
| 99 |
+
|
| 100 |
+
**Subject (nsubj):**
|
| 101 |
+
- 4,054 instances (7%) in VTB
|
| 102 |
+
- 100% right-to-left (subject precedes predicate)
|
| 103 |
+
- Average distance: 2.71 words
|
| 104 |
+
- Top patterns: VERB-NOUN (54%), VERB-PROPN (17%), VERB-PRON (13%)
|
| 105 |
+
- Subtypes: `nsubj:pass` (passive subject), `nsubj:outer` (outer subject in multiple-subject constructions)
|
| 106 |
+
|
| 107 |
+
**Object (obj):**
|
| 108 |
+
- 4,362 instances (8%) in VTB
|
| 109 |
+
- 100% left-to-right (object follows verb)
|
| 110 |
+
- Average distance: 1.48 words
|
| 111 |
+
- Top patterns: VERB-NOUN (87%), VERB-PROPN (4%), VERB-PRON (4%)
|
| 112 |
+
|
| 113 |
+
**Indirect Object (iobj):**
|
| 114 |
+
- Vietnamese ditransitive: "Hắn tặng cô ấy hoa" (He gave her flowers)
|
| 115 |
+
- `iobj(tặng, cô)` — "cô" (her) is indirect object
|
| 116 |
+
|
| 117 |
+
### 3.2 Copula and Passive
|
| 118 |
+
|
| 119 |
+
**Copula (cop):**
|
| 120 |
+
- Only lemma: **là** (is)
|
| 121 |
+
- 481 instances (1%)
|
| 122 |
+
- 98% right-to-left (là precedes predicate)
|
| 123 |
+
- Pattern: Predicate ← cop ← là
|
| 124 |
+
- Example: "Lan là sinh_viên" → cop(sinh_viên, là), nsubj(sinh_viên, Lan)
|
| 125 |
+
|
| 126 |
+
**Passive (aux:pass):**
|
| 127 |
+
- **được** (favorable passive): "Anh ấy được khen" (He was praised)
|
| 128 |
+
- **bị** (unfavorable passive): "Anh ấy bị phạt" (He was punished)
|
| 129 |
+
- Combined: 366 instances (29% of AUX tokens)
|
| 130 |
+
|
| 131 |
+
### 3.3 Vietnamese-Specific Constructions
|
| 132 |
+
|
| 133 |
+
**Serial Verb Constructions (compound:svc):**
|
| 134 |
+
- 511 instances (1%) in VTB
|
| 135 |
+
- 100% left-to-right
|
| 136 |
+
- VERB-VERB (89%), VERB-ADV (9%), VERB-ADP (1%)
|
| 137 |
+
- Examples:
|
| 138 |
+
- "chuyển về tỉnh" (transfer to province) → compound:svc(chuyển, về)
|
| 139 |
+
- "anh gọi lại" (he calls back) → compound:svc(gọi, lại)
|
| 140 |
+
|
| 141 |
+
**Verbal Modifier of Noun (compound:vmod):**
|
| 142 |
+
- 721 instances (1%)
|
| 143 |
+
- 100% left-to-right
|
| 144 |
+
- NOUN-VERB (96%)
|
| 145 |
+
- Example: "sông nhồi" (crashing waves) → compound:vmod(sông, nhồi)
|
| 146 |
+
|
| 147 |
+
**Reduplication (compound:redup, flat:redup):**
|
| 148 |
+
- Very rare in VTB (4 + 3 instances)
|
| 149 |
+
- Full reduplication: "đi đi lại lại" (go back and forth)
|
| 150 |
+
- Partial reduplication: "xanh xanh" (somewhat blue)
|
| 151 |
+
|
| 152 |
+
**Classifier (clf):**
|
| 153 |
+
- 250 instances (0.4%)
|
| 154 |
+
- 83% left-to-right
|
| 155 |
+
- Classifier groups with numeral, not noun
|
| 156 |
+
- Example: "một con đường" (one [clf] road) → clf(một, con), nmod(đường, một)
|
| 157 |
+
|
| 158 |
+
**Demonstrative Determiners (det:pmod):**
|
| 159 |
+
- Postnominal demonstratives: ấy, kia, này, đó
|
| 160 |
+
- Example: "Anh này rất cao" → det:pmod(Anh, này)
|
| 161 |
+
- Contrasts with preverbal determiners (det): những, các, mỗi
|
| 162 |
+
|
| 163 |
+
### 3.4 Clausal Modifiers
|
| 164 |
+
|
| 165 |
+
**Adnominal Clause (acl:tonp):**
|
| 166 |
+
- 97 instances in VTB
|
| 167 |
+
- 100% left-to-right
|
| 168 |
+
- NOUN-VERB (78%), NOUN-ADJ (19%)
|
| 169 |
+
- Vietnamese-specific construction
|
| 170 |
+
|
| 171 |
+
**Clausal Complement (ccomp):**
|
| 172 |
+
- "Cô ấy nói rằng anh thích chơi bài"
|
| 173 |
+
- ccomp(nói, thích) — clausal complement with subordinator "rằng"
|
| 174 |
+
|
| 175 |
+
**Open Clausal Complement (xcomp):**
|
| 176 |
+
- "Anh ấy cho bò ăn" (He feeds the cow)
|
| 177 |
+
- xcomp(cho, ăn)
|
| 178 |
+
|
| 179 |
+
### 3.5 Coordination
|
| 180 |
+
|
| 181 |
+
- First conjunct is head
|
| 182 |
+
- Coordinator attaches to immediately following conjunct
|
| 183 |
+
- Example: "Lan cao và xinh_xắn" → conj(cao, xinh_xắn), cc(xinh_xắn, và)
|
| 184 |
+
- Vietnamese CCONJ lemmas: và (656), hay (39), hoặc (21), là (7), cũng (6)
|
| 185 |
+
|
| 186 |
+
### 3.6 Relations NOT Used in VTB
|
| 187 |
+
|
| 188 |
+
- `orphan` — not attested
|
| 189 |
+
- `goeswith` — not attested
|
| 190 |
+
- `reparandum` — not attested (no spoken/informal data)
|
| 191 |
+
|
| 192 |
+
---
|
| 193 |
+
|
| 194 |
+
## 4. Morphological Features
|
| 195 |
+
|
| 196 |
+
Vietnamese has virtually no morphological inflection. The only feature used in VTB:
|
| 197 |
+
|
| 198 |
+
| Feature | Value | Count | Applied to |
|
| 199 |
+
|---------|-------|------:|-----------|
|
| 200 |
+
| NumType | Card | 1,300 | NUM tokens |
|
| 201 |
+
|
| 202 |
+
All other morphological feature slots (FEATS) are empty (`_`). This is linguistically accurate — Vietnamese expresses tense, aspect, mood, and number through separate function words rather than inflection.
|
| 203 |
+
|
| 204 |
+
**Missing but potentially useful features:**
|
| 205 |
+
- `Polarity=Neg` for negative markers (không, chẳng, chưa)
|
| 206 |
+
- `Mood=Imp` for imperative particles
|
| 207 |
+
- `Voice=Pass` for passive constructions
|
| 208 |
+
- `Tense=Past/Fut` for temporal markers (đã, sẽ)
|
| 209 |
+
|
| 210 |
+
---
|
| 211 |
+
|
| 212 |
+
## 5. Lemmatization
|
| 213 |
+
|
| 214 |
+
Vietnamese lemmatization is trivial due to the isolating nature of the language. The form/lemma ratio is 1.0 across all POS categories. Lemma = form in all cases.
|
| 215 |
+
|
| 216 |
+
---
|
| 217 |
+
|
| 218 |
+
## 6. Annotation Quality Considerations
|
| 219 |
+
|
| 220 |
+
### Known Issues in Existing Treebanks
|
| 221 |
+
|
| 222 |
+
1. **VTB conversion errors**: Automatic constituency-to-dependency conversion introduces systematic errors. The 15-point LAS gap between VnDT (79%) and VTB (65%) benchmarks is attributed to conversion quality.
|
| 223 |
+
|
| 224 |
+
2. **15% non-compliant trees**: HPSG analysis (2024) found ~15% of VietTreebank trees violate structural constraints.
|
| 225 |
+
|
| 226 |
+
3. **Limited documentation**: Vietnamese UD pages are sparse — no language-specific POS documentation, incomplete dep relation pages. Development occurs outside the UD repository.
|
| 227 |
+
|
| 228 |
+
4. **Inter-annotator agreement**: Both NIIVTB and BKTreebank report >90% IAA. VTB does not report IAA figures.
|
| 229 |
+
|
| 230 |
+
### Silver Treebank Considerations (Relevant to UDD-1)
|
| 231 |
+
|
| 232 |
+
1. **Parser quality ceiling**: VLSP 2020 best system achieves 76.27% LAS on news. Legal domain likely lower due to domain shift.
|
| 233 |
+
2. **Error propagation**: Word segmentation errors → POS errors → dependency errors.
|
| 234 |
+
3. **UPOS-XPOS consistency**: UDD-1's post-processing forces UPOS to match deprel constraints, creating 8.6% XPOS-UPOS mismatches.
|
| 235 |
+
4. **Extended auxiliary list**: UDD-1's 20-item AUX list vs. VTB's 12 may over-assign AUX tags.
|
| 236 |
+
|
| 237 |
+
---
|
| 238 |
+
|
| 239 |
+
## 7. Complete VTB Dependency Relation Inventory (50 subtypes)
|
| 240 |
+
|
| 241 |
+
### Universal Relations Used
|
| 242 |
+
acl, advcl, advmod, amod, appos, aux, case, cc, ccomp, clf, compound, conj, cop, csubj, dep, det, discourse, expl, fixed, flat, iobj, list, mark, nmod, nsubj, nummod, obj, obl, parataxis, punct, root, vocative, xcomp
|
| 243 |
+
|
| 244 |
+
### Language-Specific Subtypes
|
| 245 |
+
acl:relcl, acl:subj, acl:tmod, acl:tonp, advmod:adj, advmod:dir, advmod:neg, aux:pass, clf:det, compound:adj, compound:amod, compound:apr, compound:atov, compound:dir, compound:pron, compound:prt, compound:redup, compound:svc, compound:verbnoun, compound:vmod, compound:z, det:pmod, flat:date, flat:foreign, flat:name, flat:number, flat:redup, flat:time, mark:pcomp, nmod:poss, nsubj:nn, nsubj:pass, nsubj:xsubj, obl:agent, obl:tmod
|
| 246 |
+
|
| 247 |
+
### Relations NOT Used
|
| 248 |
+
orphan, goeswith, reparandum
|
references/research_vietnamese_ud_annotation/papers.md
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Papers: Vietnamese UD Annotation Guidelines
|
| 2 |
+
|
| 3 |
+
**Last updated**: 2026-02-08
|
| 4 |
+
|
| 5 |
+
## Category A: Vietnamese Constituency Treebanks (Foundation)
|
| 6 |
+
|
| 7 |
+
### [A1] Building a Large Syntactically-Annotated Corpus of Vietnamese
|
| 8 |
+
- **Authors**: Phuong-Thai Nguyen, Xuan-Luong Vu, Thi-Minh-Huyen Nguyen, Van-Hiep Nguyen, Hong-Phuong Le
|
| 9 |
+
- **Venue**: Third Linguistic Annotation Workshop (LAW III), ACL-IJCNLP 2009
|
| 10 |
+
- **Year**: 2009
|
| 11 |
+
- **URL**: https://aclanthology.org/W09-3035/
|
| 12 |
+
- **Summary**: Foundational paper for VietTreebank (VLSP treebank). Presents construction of a large syntactically-annotated corpus from Tuoi Tre newspaper. Addresses Vietnamese-specific challenges: absence of word delimiters, high syntactic ambiguity from the isolating nature of the language. Three annotation layers: word segmentation, POS tagging, bracketing. This is the source treebank for both VnDT and UD_Vietnamese-VTB.
|
| 13 |
+
- **Relevance**: Primary reference for UD_Vietnamese-VTB. Defines the original constituency annotation guidelines that were later converted to UD format.
|
| 14 |
+
|
| 15 |
+
### [A2] Vietnamese Treebank Construction and Entropy-Based Error Detection
|
| 16 |
+
- **Authors**: Phuong-Thai Nguyen, Anh-Cuong Le, Tu Bao Ho, et al.
|
| 17 |
+
- **Venue**: Language Resources and Evaluation (Springer), Vol. 49, pp. 487-519
|
| 18 |
+
- **Year**: 2015
|
| 19 |
+
- **URL**: https://link.springer.com/article/10.1007/s10579-015-9308-5
|
| 20 |
+
- **Summary**: Extended description of VietTreebank construction (~10,374 sentences). Proposes entropy-based method for error detection in treebank annotation. Addresses Vietnamese properties: lack of word delimiter and isolation creating high syntactic ambiguity. Three annotation layers with detailed guidelines for each.
|
| 21 |
+
- **Relevance**: Detailed methodology for Vietnamese treebank annotation quality control.
|
| 22 |
+
|
| 23 |
+
### [A3] Challenges and Solutions for Consistent Annotation of Vietnamese Treebank
|
| 24 |
+
- **Authors**: Quy Nguyen, Yusuke Miyao, Ha Le, Ngan Nguyen
|
| 25 |
+
- **Venue**: LREC 2016
|
| 26 |
+
- **Year**: 2016
|
| 27 |
+
- **URL**: https://aclanthology.org/L16-1243/
|
| 28 |
+
- **Summary**: NIIVTB treebank (~40,000 sentences). Documents linguistic challenges specific to Vietnamese and solutions through detailed annotation guidelines. Inter-annotator agreement >90%. Three layers: word segmentation, POS tagging, bracketing.
|
| 29 |
+
- **Relevance**: Most comprehensive paper on Vietnamese-specific annotation challenges and solutions.
|
| 30 |
+
|
| 31 |
+
### [A4] Ensuring Annotation Consistency and Accuracy for Vietnamese Treebank
|
| 32 |
+
- **Authors**: Quy T. Nguyen, Yusuke Miyao, Ha T. T. Le, Nhung T. H. Nguyen
|
| 33 |
+
- **Venue**: Language Resources and Evaluation (Springer)
|
| 34 |
+
- **Year**: 2018 (online 2017)
|
| 35 |
+
- **URL**: https://link.springer.com/article/10.1007/s10579-017-9398-3
|
| 36 |
+
- **Summary**: The most comprehensive paper on Vietnamese treebank annotation guidelines. Presents NIIVTB (20,588 sentences) with consistent annotation across word segmentation, POS tagging, and bracketing. Detailed annotation scheme with 33 POS tags, 18 phrase types, 22 functional tags, and 7 null element categories. Inter-annotator agreement and accuracy >90%.
|
| 37 |
+
- **Relevance**: Gold standard for Vietnamese annotation guideline documentation. Defines the detailed POS tagset and structural conventions.
|
| 38 |
+
|
| 39 |
+
## Category B: Vietnamese Dependency Treebanks
|
| 40 |
+
|
| 41 |
+
### [B1] Building a Treebank for Vietnamese Dependency Parsing
|
| 42 |
+
- **Authors**: T. L. Nguyen, M. L. Ha, V. H. Nguyen, T. M. H. Nguyen, P. Le-Hong
|
| 43 |
+
- **Venue**: RIVF 2013, IEEE
|
| 44 |
+
- **Year**: 2013
|
| 45 |
+
- **URL**: https://ieeexplore.ieee.org/document/6719884
|
| 46 |
+
- **Summary**: First paper on building a Vietnamese dependency treebank via conversion from constituency VietTreebank. Defines dependency labels by adapting Stanford dependency schema for Vietnamese grammar. ~3,000 sentences converted and manually edited. Evaluated with MaltParser.
|
| 47 |
+
- **Relevance**: Original dependency label design for Vietnamese.
|
| 48 |
+
|
| 49 |
+
### [B2] From Treebank Conversion to Automatic Dependency Parsing for Vietnamese
|
| 50 |
+
- **Authors**: Dat Quoc Nguyen, Dai Quoc Nguyen, Son Bao Pham, Phuong-Thai Nguyen, Minh Le Nguyen
|
| 51 |
+
- **Venue**: NLDB 2014, Springer LNCS vol. 8455, pp. 196-207
|
| 52 |
+
- **Year**: 2014
|
| 53 |
+
- **URL**: https://link.springer.com/chapter/10.1007/978-3-319-07983-7_26
|
| 54 |
+
- **PDF**: https://datquocnguyen.github.io/resources/NLDB2014.pdf
|
| 55 |
+
- **Summary**: VnDT treebank (10,200 sentences). Two-stage conversion: (1) head-percolation rules for each phrase, (2) dependency labeling. 33 dependency labels adapted from Stanford dependencies. 4.49% non-projective trees. Evaluated with MSTParser (UAS 79.08%, LAS 71.66% on gold POS).
|
| 56 |
+
- **Relevance**: Defines the VnDT dependency label set and conversion methodology. VnDT v1.1 is the primary Vietnamese parsing benchmark.
|
| 57 |
+
|
| 58 |
+
### [B3] BKTreebank: Building a Vietnamese Dependency Treebank
|
| 59 |
+
- **Authors**: Kiem-Hieu Nguyen
|
| 60 |
+
- **Venue**: LREC 2018
|
| 61 |
+
- **Year**: 2018
|
| 62 |
+
- **arXiv**: https://arxiv.org/abs/1710.05519
|
| 63 |
+
- **URL**: https://aclanthology.org/L18-1341/
|
| 64 |
+
- **Summary**: BKTreebank (~6,900 sentences), built from scratch (not via conversion). Three primary aspects: POS tagset design, dependency relation design, and annotation guidelines. Specifically addresses Vietnamese classifiers, serial verbs, topic-comment structure, and reduplication. 3 annotators, inter-annotator agreement >90%.
|
| 65 |
+
- **Relevance**: Most detailed paper specifically dedicated to Vietnamese dependency annotation guidelines. Directly addresses linguistic phenomena relevant to UDD-1.
|
| 66 |
+
|
| 67 |
+
### [B4] Converting a Constituency Treebank to Dependency Treebank for Vietnamese
|
| 68 |
+
- **Authors**: (Multiple authors)
|
| 69 |
+
- **Venue**: IEEE Conference
|
| 70 |
+
- **Year**: 2022
|
| 71 |
+
- **URL**: https://ieeexplore.ieee.org/document/10013806/
|
| 72 |
+
- **Summary**: New method for constituency-to-dependency conversion for Vietnamese. Designs new dependency labels and head-percolation rules. ~13% UAS and ~21% LAS improvement over previous conversion methods.
|
| 73 |
+
- **Relevance**: Improved conversion methodology relevant to understanding annotation quality in converted treebanks.
|
| 74 |
+
|
| 75 |
+
## Category C: UD Vietnamese-VTB and VLSP Shared Tasks
|
| 76 |
+
|
| 77 |
+
### [C1] VLSP 2020 Shared Task: Universal Dependency Parsing for Vietnamese
|
| 78 |
+
- **Authors**: Ha My Linh, Nguyen Thi Minh Huyen, Vu Xuan Luong, Nguyen Thi Luong, Phan Thi Hue, Le Van Cuong
|
| 79 |
+
- **Venue**: 7th International Workshop on Vietnamese Language and Speech Processing (VLSP 2020)
|
| 80 |
+
- **Year**: 2020
|
| 81 |
+
- **URL**: https://aclanthology.org/2020.vlsp-1.15/
|
| 82 |
+
- **Summary**: Overview paper for VLSP 2020 shared task on Vietnamese UD parsing. ~10,000 sentences with gold word segmentation and POS tags. UD v2 relations. CoNLL-U format. Best system: PhoBERT+ELMO/Biaffine at 76.27% LAS.
|
| 83 |
+
- **Relevance**: Defines the evaluation framework and data format used in Vietnamese UD parsing. The VLSP 2020 data is the training source for UDD-1's parser.
|
| 84 |
+
|
| 85 |
+
### [C2] VLSP 2019 Shared Task: Dependency Parsing for Vietnamese
|
| 86 |
+
- **Venue**: VLSP 2019
|
| 87 |
+
- **Year**: 2019
|
| 88 |
+
- **URL**: https://vlsp.org.vn/vlsp2019/eval/udp
|
| 89 |
+
- **Summary**: First VLSP shared task on Vietnamese dependency parsing. ~10,000 sentences with UD v2 labels in CoNLL-U format. Published a Vietnamese dependency annotation guideline for reference.
|
| 90 |
+
- **Relevance**: First formal shared task establishing Vietnamese UD parsing conventions.
|
| 91 |
+
|
| 92 |
+
## Category D: Error Analysis and Quality Studies
|
| 93 |
+
|
| 94 |
+
### [D1] An Attempt to Develop a Neural Parser based on Simplified HPSG on Vietnamese
|
| 95 |
+
- **Authors**: Duc-Vu Nguyen, Thang Chau Phan, Quoc-Nam Nguyen, Kiet Van Nguyen, Ngan Luu-Thuy Nguyen
|
| 96 |
+
- **Venue**: SoICT 2024
|
| 97 |
+
- **Year**: 2024
|
| 98 |
+
- **arXiv**: https://arxiv.org/abs/2411.17270
|
| 99 |
+
- **Summary**: Neural parser using simplified HPSG for Vietnamese. Found ~15% of constituency and dependency tree pairs in existing corpora fail HPSG compliance. Recommends linguistic expert involvement in treebank development.
|
| 100 |
+
- **Relevance**: Quantifies annotation quality issues in existing Vietnamese treebanks.
|
| 101 |
+
|
| 102 |
+
### [D2] Error Analysis for Vietnamese Dependency Parsing
|
| 103 |
+
- **Authors**: Kiet Van Nguyen, Ngan Luu-Thuy Nguyen
|
| 104 |
+
- **Venue**: KSE 2015 / arXiv 2019
|
| 105 |
+
- **Year**: 2015/2019
|
| 106 |
+
- **arXiv**: https://arxiv.org/abs/1911.03724
|
| 107 |
+
- **Summary**: Systematic error analysis for Vietnamese dependency parsing using MSTParser and MaltParser. Reveals specific weaknesses related to Vietnamese morphosyntax and annotation conventions.
|
| 108 |
+
- **Relevance**: Documents parser error patterns relevant to silver treebank quality assessment.
|
| 109 |
+
|
| 110 |
+
## Category E: NLP Toolkits and Systems
|
| 111 |
+
|
| 112 |
+
### [E1] VnCoreNLP: A Vietnamese Natural Language Processing Toolkit
|
| 113 |
+
- **Authors**: Thanh Vu, Dat Quoc Nguyen, Dai Quoc Nguyen, Mark Dras, Mark Johnson
|
| 114 |
+
- **Venue**: NAACL 2018 (Demonstrations)
|
| 115 |
+
- **Year**: 2018
|
| 116 |
+
- **URL**: https://aclanthology.org/N18-5012/
|
| 117 |
+
- **GitHub**: https://github.com/vncorenlp/VnCoreNLP
|
| 118 |
+
- **Summary**: Java NLP toolkit for word segmentation, POS tagging, NER, and dependency parsing. Uses VnDT treebank. LAS 73.53%, UAS 80.66%. References VLSP POS tagset documentation.
|
| 119 |
+
- **Relevance**: Widely-used toolkit following Vietnamese annotation conventions.
|
| 120 |
+
|
| 121 |
+
### [E2] PhoNLP: Joint Multi-task Learning for Vietnamese POS Tagging, NER and Dependency Parsing
|
| 122 |
+
- **Authors**: Linh The Nguyen, Dat Quoc Nguyen
|
| 123 |
+
- **Venue**: NAACL 2021 (Demonstrations)
|
| 124 |
+
- **Year**: 2021
|
| 125 |
+
- **URL**: https://aclanthology.org/2021.naacl-demos.1/
|
| 126 |
+
- **GitHub**: https://github.com/VinAIResearch/PhoNLP
|
| 127 |
+
- **Summary**: SOTA Vietnamese NLP system. Multi-task learning with PhoBERT. 79.11% LAS on VnDT v1.1. Trained on VnDT with corrected conversion errors.
|
| 128 |
+
- **Relevance**: Current SOTA parser for Vietnamese dependency parsing.
|
| 129 |
+
|
| 130 |
+
## Category F: Supplementary Resources
|
| 131 |
+
|
| 132 |
+
### [F1] BERT-Based Sentence Recommendation for Building Vietnamese UD Treebank
|
| 133 |
+
- **Venue**: FDSE 2023, Springer
|
| 134 |
+
- **Year**: 2023
|
| 135 |
+
- **URL**: https://link.springer.com/chapter/10.1007/978-981-99-8296-7_28
|
| 136 |
+
- **Summary**: BERT-based approach for selecting sentences likely to achieve high LAS when parsed, to improve efficiency of Vietnamese UD treebank construction.
|
| 137 |
+
|
| 138 |
+
### [F2] VLO V1.1 - A Vietnamese Lexicon Ontology for Universal Dependency Parsing
|
| 139 |
+
- **Venue**: IEEE Conference
|
| 140 |
+
- **Year**: 2021
|
| 141 |
+
- **URL**: https://ieeexplore.ieee.org/document/9353080/
|
| 142 |
+
- **Summary**: Vietnamese Lexicon Ontology providing semantic constraints for UD parsing. Rule-based transformation of dependency parse results.
|
| 143 |
+
|
| 144 |
+
### [F3] Sentential Semantic Dependency Parsing for Vietnamese
|
| 145 |
+
- **Authors**: Do, Nguyen
|
| 146 |
+
- **Venue**: SN Computer Science (Springer)
|
| 147 |
+
- **Year**: 2021
|
| 148 |
+
- **URL**: https://link.springer.com/article/10.1007/s42979-021-00715-4
|
| 149 |
+
- **Summary**: Semantic dependency parsing producing dependency graphs (not trees) for Vietnamese.
|
| 150 |
+
|
| 151 |
+
### [F4] From Word Segmentation to POS Tagging for Vietnamese
|
| 152 |
+
- **Authors**: Dat Quoc Nguyen, Thanh Vu, Dai Quoc Nguyen, Mark Dras, Mark Johnson
|
| 153 |
+
- **Venue**: ALTA 2017
|
| 154 |
+
- **Year**: 2017
|
| 155 |
+
- **URL**: https://aclanthology.org/U17-1013/
|
| 156 |
+
- **Summary**: Vietnamese word segmentation and POS tagging as prerequisite tasks for dependency parsing.
|
| 157 |
+
|
| 158 |
+
### [F5] Automated Extraction of Tree Adjoining Grammars from a Treebank for Vietnamese
|
| 159 |
+
- **Authors**: Phuong Le-Hong, Thi Minh Huyen Nguyen, Phuong Thai Nguyen, Azim Roussanaly
|
| 160 |
+
- **Venue**: TAG+10 Workshop
|
| 161 |
+
- **Year**: 2010
|
| 162 |
+
- **URL**: https://aclanthology.org/W10-4421/
|
| 163 |
+
- **Summary**: Extracts TAGs from Vietnamese treebank, demonstrating utility of the annotation scheme.
|
| 164 |
+
|
| 165 |
+
## Key Authors
|
| 166 |
+
|
| 167 |
+
| Author | Affiliation | Key Contributions |
|
| 168 |
+
|--------|------------|-------------------|
|
| 169 |
+
| Nguyen Thi Minh Huyen | VNU Hanoi | VietTreebank, UD_Vietnamese-VTB, VLSP shared tasks |
|
| 170 |
+
| Phuong-Thai Nguyen | VNU Hanoi | VietTreebank construction, error detection |
|
| 171 |
+
| Dat Quoc Nguyen | VinAI / U. Melbourne | VnDT, VnCoreNLP, PhoNLP |
|
| 172 |
+
| Quy T. Nguyen | NII Tokyo | NIIVTB annotation consistency |
|
| 173 |
+
| Yusuke Miyao | NII Tokyo / U. Tokyo | NIIVTB annotation methodology |
|
| 174 |
+
| Kiem-Hieu Nguyen | HUST Hanoi | BKTreebank annotation guidelines |
|
| 175 |
+
| Ha My Linh | VNU Hanoi | UD_Vietnamese-VTB, VLSP 2020 |
|
| 176 |
+
| Le Hong Phuong | VNU Hanoi | UD_Vietnamese-VTB, vnTagger |
|
| 177 |
+
| Kiet Van Nguyen | UIT HCMC | Error analysis, HPSG parsing |
|
scripts/convert_to_ud.py
DELETED
|
@@ -1,655 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Convert sentences to Universal Dependencies format compatible with HuggingFace.
|
| 3 |
-
Structure follows: https://huggingface.co/datasets/commul/universal_dependencies/viewer/vi_vtb
|
| 4 |
-
Uses underthesea dependency_parse for proper annotations.
|
| 5 |
-
|
| 6 |
-
Optimized for GPU batch processing.
|
| 7 |
-
"""
|
| 8 |
-
|
| 9 |
-
import json
|
| 10 |
-
import os
|
| 11 |
-
from os.path import dirname, expanduser, join
|
| 12 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 13 |
-
import multiprocessing
|
| 14 |
-
|
| 15 |
-
# Fix GPU tensor compatibility issue with pack_padded_sequence
|
| 16 |
-
# The lengths tensor must be on CPU even when using CUDA
|
| 17 |
-
import torch
|
| 18 |
-
_original_pack = torch.nn.utils.rnn.pack_padded_sequence
|
| 19 |
-
|
| 20 |
-
def _patched_pack(input, lengths, batch_first=False, enforce_sorted=True):
|
| 21 |
-
if lengths.is_cuda:
|
| 22 |
-
lengths = lengths.cpu()
|
| 23 |
-
return _original_pack(input, lengths, batch_first=batch_first, enforce_sorted=enforce_sorted)
|
| 24 |
-
|
| 25 |
-
torch.nn.utils.rnn.pack_padded_sequence = _patched_pack
|
| 26 |
-
|
| 27 |
-
from underthesea import dependency_parse, pos_tag
|
| 28 |
-
|
| 29 |
-
# Global model cache for batch processing
|
| 30 |
-
_models_loaded = False
|
| 31 |
-
|
| 32 |
-
# Map Vietnamese POS tags to Universal POS tags
|
| 33 |
-
# Based on: https://universaldependencies.org/u/pos/
|
| 34 |
-
UPOS_MAP = {
|
| 35 |
-
'N': 'NOUN', # Noun
|
| 36 |
-
'Np': 'PROPN', # Proper noun
|
| 37 |
-
'Nc': 'NOUN', # Classifier noun
|
| 38 |
-
'Nu': 'NOUN', # Unit noun
|
| 39 |
-
'V': 'VERB', # Verb
|
| 40 |
-
'A': 'ADJ', # Adjective
|
| 41 |
-
'P': 'PRON', # Pronoun
|
| 42 |
-
'R': 'ADV', # Adverb
|
| 43 |
-
'L': 'DET', # Determiner/Quantifier
|
| 44 |
-
'M': 'NUM', # Numeral
|
| 45 |
-
'E': 'ADP', # Preposition
|
| 46 |
-
'C': 'CCONJ', # Coordinating conjunction
|
| 47 |
-
'CC': 'CCONJ', # Coordinating conjunction
|
| 48 |
-
'SC': 'SCONJ', # Subordinating conjunction
|
| 49 |
-
'I': 'INTJ', # Interjection
|
| 50 |
-
'T': 'PART', # Particle
|
| 51 |
-
'B': 'X', # Foreign word
|
| 52 |
-
'Y': 'X', # Abbreviation
|
| 53 |
-
'S': 'SYM', # Symbol
|
| 54 |
-
'X': 'X', # Other
|
| 55 |
-
'CH': 'PUNCT', # Punctuation
|
| 56 |
-
'Ny': 'NOUN', # Noun (variant)
|
| 57 |
-
}
|
| 58 |
-
|
| 59 |
-
# Vietnamese auxiliary verbs that should be tagged as AUX
|
| 60 |
-
# Based on UD Vietnamese validation data (data.json)
|
| 61 |
-
AUX_WORDS = {
|
| 62 |
-
'bị', 'chưa thể', 'chắc chắn', 'có thể', 'có vẻ', 'cần',
|
| 63 |
-
'giả', 'không thể', 'là', 'muốn', 'nghĩa là', 'nhằm',
|
| 64 |
-
'nên', 'phải', 'quyết', 'thôi', 'thể', 'xong', 'được', 'định'
|
| 65 |
-
}
|
| 66 |
-
|
| 67 |
-
# Vietnamese determiners - words that should be DET when used as 'det' relation
|
| 68 |
-
DET_WORDS = {
|
| 69 |
-
'các', 'những', 'mọi', 'mỗi', 'từng', 'bất kỳ', 'một', 'hai', 'ba',
|
| 70 |
-
'này', 'đó', 'kia', 'ấy', 'nọ', 'nào', 'đấy', 'cái', 'con', 'chiếc',
|
| 71 |
-
'người', 'cả', 'phá tán' # Words that appear as det in the data
|
| 72 |
-
}
|
| 73 |
-
|
| 74 |
-
# Words that can be ADV when used as 'advmod'
|
| 75 |
-
ADV_WORDS = {
|
| 76 |
-
'không', 'chưa', 'đã', 'đang', 'sẽ', 'còn', 'vẫn', 'cũng', 'rất',
|
| 77 |
-
'quá', 'lắm', 'hơn', 'nhất', 'luôn', 'thường', 'hay', 'ít', 'nhiều',
|
| 78 |
-
'tự', 'một cách', 'được', 'không thể', 'lại', 'cá biệt', 'dân sự'
|
| 79 |
-
}
|
| 80 |
-
|
| 81 |
-
# Invalid deprels that need to be mapped to valid ones
|
| 82 |
-
DEPREL_MAP = {
|
| 83 |
-
'acomp': 'xcomp', # Adjectival complement -> open clausal complement
|
| 84 |
-
'nmod:comp': 'nmod', # Invalid subtype
|
| 85 |
-
'nmod:agent': 'obl:agent', # Agent should be obl not nmod
|
| 86 |
-
'nmod:with': 'nmod', # Invalid subtype
|
| 87 |
-
'nmod:about': 'nmod', # Invalid subtype -> nmod
|
| 88 |
-
'compound:number': 'nummod', # Number compounds should be nummod
|
| 89 |
-
'compound:nmod': 'compound', # Invalid subtype
|
| 90 |
-
'obl:pcomp': 'obl', # Invalid subtype -> obl
|
| 91 |
-
}
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
def to_upos(tag, token=None):
|
| 95 |
-
"""Convert Vietnamese POS tag to Universal POS tag."""
|
| 96 |
-
# Check if token is an auxiliary verb (case insensitive)
|
| 97 |
-
if token:
|
| 98 |
-
token_lower = token.lower()
|
| 99 |
-
if token_lower in AUX_WORDS:
|
| 100 |
-
return 'AUX'
|
| 101 |
-
# Also check if lowercased token matches
|
| 102 |
-
for aux in AUX_WORDS:
|
| 103 |
-
if token_lower == aux.lower():
|
| 104 |
-
return 'AUX'
|
| 105 |
-
return UPOS_MAP.get(tag, 'X')
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
def fix_syntax_errors(tokens, upos, head, deprel):
|
| 109 |
-
"""
|
| 110 |
-
Post-process to fix common UD SYNTAX validation errors.
|
| 111 |
-
Returns fixed (upos, deprel) lists.
|
| 112 |
-
Run multiple passes to handle dependencies between fixes.
|
| 113 |
-
"""
|
| 114 |
-
n = len(tokens)
|
| 115 |
-
upos = list(upos)
|
| 116 |
-
deprel = list(deprel)
|
| 117 |
-
head = [int(h) for h in head]
|
| 118 |
-
|
| 119 |
-
# First pass: fix leaf nodes (aux/mark/case/punct should not have children)
|
| 120 |
-
# Need multiple passes to handle chains of leaf nodes
|
| 121 |
-
for _ in range(5): # Multiple passes to handle chains
|
| 122 |
-
changed = False
|
| 123 |
-
for i in range(n):
|
| 124 |
-
rel = deprel[i]
|
| 125 |
-
|
| 126 |
-
# Leaf nodes should not have children - redirect children to parent
|
| 127 |
-
# Include subtypes like aux:pass, mark:pcomp, etc.
|
| 128 |
-
# Also include det, nummod, clf which should be leaves
|
| 129 |
-
if rel.split(':')[0] in ('aux', 'cop', 'mark', 'case', 'punct', 'det', 'nummod', 'clf'):
|
| 130 |
-
has_children = any(head[j] == i + 1 for j in range(n))
|
| 131 |
-
if has_children:
|
| 132 |
-
my_head = head[i]
|
| 133 |
-
for j in range(n):
|
| 134 |
-
if head[j] == i + 1:
|
| 135 |
-
head[j] = my_head
|
| 136 |
-
changed = True
|
| 137 |
-
if not changed:
|
| 138 |
-
break
|
| 139 |
-
|
| 140 |
-
for i in range(n):
|
| 141 |
-
token_lower = tokens[i].lower()
|
| 142 |
-
rel = deprel[i]
|
| 143 |
-
pos = upos[i]
|
| 144 |
-
|
| 145 |
-
# Fix 0: Map invalid deprels to valid ones
|
| 146 |
-
if rel in DEPREL_MAP:
|
| 147 |
-
deprel[i] = DEPREL_MAP[rel]
|
| 148 |
-
rel = deprel[i]
|
| 149 |
-
|
| 150 |
-
# Fix 1: rel-upos-det - 'det' (including subtypes) should be DET or PRON
|
| 151 |
-
if rel.startswith('det') and pos not in ('DET', 'PRON'):
|
| 152 |
-
# Force all 'det' relations to have DET or PRON UPOS
|
| 153 |
-
upos[i] = 'DET'
|
| 154 |
-
|
| 155 |
-
# Fix 2: rel-upos-advmod - 'advmod' (including subtypes) should be ADV
|
| 156 |
-
if rel.startswith('advmod') and pos != 'ADV':
|
| 157 |
-
# For advmod, always prefer changing UPOS to ADV
|
| 158 |
-
upos[i] = 'ADV'
|
| 159 |
-
|
| 160 |
-
# Fix 2b: rel-upos-nummod - 'nummod' should be NUM
|
| 161 |
-
if rel.startswith('nummod') and upos[i] != 'NUM':
|
| 162 |
-
# If token is clearly not a number (e.g., VERB), change relation instead
|
| 163 |
-
if upos[i] == 'VERB':
|
| 164 |
-
deprel[i] = 'acl' # Adjectival clause for verbs
|
| 165 |
-
rel = 'acl' # Update local variable too
|
| 166 |
-
elif upos[i] == 'ADJ':
|
| 167 |
-
deprel[i] = 'amod' # Adjectival modifier
|
| 168 |
-
rel = 'amod'
|
| 169 |
-
else:
|
| 170 |
-
upos[i] = 'NUM'
|
| 171 |
-
|
| 172 |
-
# Fix 3: rel-upos-mark - 'mark' (including subtypes) should not be AUX
|
| 173 |
-
if rel.startswith('mark') and pos == 'AUX':
|
| 174 |
-
upos[i] = 'SCONJ'
|
| 175 |
-
|
| 176 |
-
# Fix 3b: rel-upos-punct - 'punct' must be PUNCT, and PUNCT must have 'punct' deprel
|
| 177 |
-
if rel == 'punct' and pos != 'PUNCT':
|
| 178 |
-
# Change relation to something appropriate based on POS
|
| 179 |
-
if pos in ('VERB', 'NOUN', 'ADJ'):
|
| 180 |
-
deprel[i] = 'dep' # Use generic dependency
|
| 181 |
-
else:
|
| 182 |
-
upos[i] = 'PUNCT'
|
| 183 |
-
|
| 184 |
-
# Fix 3b2: upos-rel-punct - PUNCT must have 'punct' deprel
|
| 185 |
-
if pos == 'PUNCT' and rel != 'punct':
|
| 186 |
-
deprel[i] = 'punct'
|
| 187 |
-
rel = 'punct'
|
| 188 |
-
|
| 189 |
-
# Fix 3c: rel-upos-case - 'case' should be ADP, not ADJ, AUX or PROPN
|
| 190 |
-
if rel == 'case' and pos in ('ADJ', 'AUX', 'PROPN', 'NOUN', 'VERB'):
|
| 191 |
-
upos[i] = 'ADP'
|
| 192 |
-
|
| 193 |
-
# Fix 3d: rel-upos-cc - 'cc' should be CCONJ or SCONJ
|
| 194 |
-
if rel == 'cc' and pos not in ('CCONJ', 'SCONJ'):
|
| 195 |
-
upos[i] = 'CCONJ'
|
| 196 |
-
|
| 197 |
-
# Fix 3e: rel-upos-aux - 'aux' should be AUX, but only for valid auxiliaries
|
| 198 |
-
is_valid_aux = token_lower in AUX_WORDS or any(token_lower == aux.lower() for aux in AUX_WORDS)
|
| 199 |
-
if rel.startswith('aux'):
|
| 200 |
-
if is_valid_aux:
|
| 201 |
-
upos[i] = 'AUX'
|
| 202 |
-
pos = 'AUX'
|
| 203 |
-
else:
|
| 204 |
-
# Not a valid auxiliary - change relation to advcl or xcomp
|
| 205 |
-
if pos == 'VERB' or upos[i] == 'VERB':
|
| 206 |
-
deprel[i] = 'advcl'
|
| 207 |
-
upos[i] = 'VERB'
|
| 208 |
-
elif pos == 'ADP' or upos[i] == 'ADP':
|
| 209 |
-
deprel[i] = 'mark'
|
| 210 |
-
upos[i] = 'ADP'
|
| 211 |
-
else:
|
| 212 |
-
deprel[i] = 'xcomp'
|
| 213 |
-
rel = deprel[i]
|
| 214 |
-
pos = upos[i]
|
| 215 |
-
# Also fix AUX UPOS that's not a valid auxiliary (MORPHO aux-lemma)
|
| 216 |
-
elif pos == 'AUX' and not is_valid_aux:
|
| 217 |
-
upos[i] = 'VERB' # Default to VERB for non-aux
|
| 218 |
-
pos = 'VERB'
|
| 219 |
-
|
| 220 |
-
# Fix 3f: rel-upos-cop - 'cop' should be AUX or PRON/DET, only 'là' is valid copula
|
| 221 |
-
if rel == 'cop':
|
| 222 |
-
if token_lower != 'là':
|
| 223 |
-
# Not a valid copula, change to xcomp
|
| 224 |
-
deprel[i] = 'xcomp'
|
| 225 |
-
rel = 'xcomp'
|
| 226 |
-
elif pos not in ('AUX', 'PRON', 'DET'):
|
| 227 |
-
upos[i] = 'AUX'
|
| 228 |
-
|
| 229 |
-
# Fix 4: obl-should-be-nmod - when parent is nominal, use nmod
|
| 230 |
-
if rel.startswith('obl') and head[i] > 0:
|
| 231 |
-
parent_idx = head[i] - 1
|
| 232 |
-
if parent_idx < n and upos[parent_idx] in ('NOUN', 'PROPN', 'PRON'):
|
| 233 |
-
# Preserve subtype if exists
|
| 234 |
-
if ':' in rel:
|
| 235 |
-
deprel[i] = 'nmod:' + rel.split(':')[1]
|
| 236 |
-
else:
|
| 237 |
-
deprel[i] = 'nmod'
|
| 238 |
-
|
| 239 |
-
# Fix 5: (handled in first pass above)
|
| 240 |
-
|
| 241 |
-
# Fix 5b: right-to-left relations - flat/conj/appos must be left-to-right
|
| 242 |
-
for i in range(n):
|
| 243 |
-
rel = deprel[i]
|
| 244 |
-
base_rel = rel.split(':')[0]
|
| 245 |
-
if base_rel in ('flat', 'conj', 'appos') and head[i] > 0:
|
| 246 |
-
parent_idx = head[i] - 1
|
| 247 |
-
if parent_idx > i: # Parent comes after child (wrong direction)
|
| 248 |
-
# Change to compound which allows both directions
|
| 249 |
-
if ':' in rel:
|
| 250 |
-
deprel[i] = 'compound:' + rel.split(':')[1]
|
| 251 |
-
else:
|
| 252 |
-
deprel[i] = 'compound'
|
| 253 |
-
|
| 254 |
-
# Fix 5c: Apply DEPREL_MAP again to catch any newly created invalid deprels
|
| 255 |
-
for i in range(n):
|
| 256 |
-
if deprel[i] in DEPREL_MAP:
|
| 257 |
-
deprel[i] = DEPREL_MAP[deprel[i]]
|
| 258 |
-
|
| 259 |
-
# Fix 5d: Final check for nummod with wrong UPOS
|
| 260 |
-
for i in range(n):
|
| 261 |
-
if deprel[i].startswith('nummod') and upos[i] != 'NUM':
|
| 262 |
-
if upos[i] == 'VERB':
|
| 263 |
-
deprel[i] = 'acl'
|
| 264 |
-
elif upos[i] == 'ADJ':
|
| 265 |
-
deprel[i] = 'amod'
|
| 266 |
-
elif upos[i] == 'NOUN':
|
| 267 |
-
deprel[i] = 'nmod'
|
| 268 |
-
else:
|
| 269 |
-
upos[i] = 'NUM'
|
| 270 |
-
|
| 271 |
-
# Fix 6: too-many-subjects - add :outer subtype for multiple subjects
|
| 272 |
-
# Group all subject types (nsubj, csubj) by predicate
|
| 273 |
-
predicates = {}
|
| 274 |
-
for i in range(n):
|
| 275 |
-
base_rel = deprel[i].split(':')[0]
|
| 276 |
-
if base_rel in ('nsubj', 'csubj') and head[i] > 0:
|
| 277 |
-
pred_idx = head[i]
|
| 278 |
-
if pred_idx not in predicates:
|
| 279 |
-
predicates[pred_idx] = []
|
| 280 |
-
predicates[pred_idx].append((i, base_rel))
|
| 281 |
-
|
| 282 |
-
for pred_idx, subj_list in predicates.items():
|
| 283 |
-
if len(subj_list) > 1:
|
| 284 |
-
# Sort by position to keep first subject as main
|
| 285 |
-
subj_list.sort(key=lambda x: x[0])
|
| 286 |
-
# Mark all but the first as :outer (only nsubj:outer is valid, not csubj:outer)
|
| 287 |
-
for idx, base_rel in subj_list[1:]:
|
| 288 |
-
if ':outer' not in deprel[idx]:
|
| 289 |
-
# csubj:outer is not a valid UD relation, use nsubj:outer instead
|
| 290 |
-
deprel[idx] = 'nsubj:outer'
|
| 291 |
-
|
| 292 |
-
# Fix 7: too-many-objects - add :pass or compound for multiple objects
|
| 293 |
-
predicates_obj = {}
|
| 294 |
-
for i in range(n):
|
| 295 |
-
if deprel[i] == 'obj' and head[i] > 0:
|
| 296 |
-
pred_idx = head[i]
|
| 297 |
-
if pred_idx not in predicates_obj:
|
| 298 |
-
predicates_obj[pred_idx] = []
|
| 299 |
-
predicates_obj[pred_idx].append(i)
|
| 300 |
-
|
| 301 |
-
for pred_idx, obj_indices in predicates_obj.items():
|
| 302 |
-
if len(obj_indices) > 1:
|
| 303 |
-
# Mark subsequent objects as compound
|
| 304 |
-
for idx in obj_indices[1:]:
|
| 305 |
-
# Check if it's adjacent to previous - likely compound
|
| 306 |
-
if idx > 0 and obj_indices[0] == idx - 1:
|
| 307 |
-
deprel[idx] = 'compound'
|
| 308 |
-
else:
|
| 309 |
-
deprel[idx] = 'iobj'
|
| 310 |
-
|
| 311 |
-
# Fix 8: punct-is-nonproj - attach punctuation to avoid non-projectivity
|
| 312 |
-
# Try to find the best attachment point that doesn't cross other edges
|
| 313 |
-
for i in range(n):
|
| 314 |
-
if upos[i] == 'PUNCT':
|
| 315 |
-
# Try candidates in order: previous token, next token, then expand outward
|
| 316 |
-
candidates = []
|
| 317 |
-
if i > 0:
|
| 318 |
-
candidates.append(i) # Previous token (1-based)
|
| 319 |
-
if i + 1 < n:
|
| 320 |
-
candidates.append(i + 2) # Next token (1-based)
|
| 321 |
-
|
| 322 |
-
# Expand to find more candidates
|
| 323 |
-
for dist in range(2, n):
|
| 324 |
-
if i - dist >= 0:
|
| 325 |
-
candidates.append(i - dist + 1) # 1-based
|
| 326 |
-
if i + dist < n:
|
| 327 |
-
candidates.append(i + dist + 1) # 1-based
|
| 328 |
-
|
| 329 |
-
# Find best attachment that doesn't cause crossing
|
| 330 |
-
best_head = candidates[0] if candidates else 1
|
| 331 |
-
for cand in candidates:
|
| 332 |
-
test_head = list(head)
|
| 333 |
-
test_head[i] = cand
|
| 334 |
-
if not punct_causes_crossing(i, cand - 1, test_head, n):
|
| 335 |
-
best_head = cand
|
| 336 |
-
break
|
| 337 |
-
|
| 338 |
-
head[i] = best_head
|
| 339 |
-
|
| 340 |
-
return upos, [str(h) for h in head], deprel
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
def punct_causes_crossing(punct_idx, new_head_idx, head, n):
|
| 344 |
-
"""Check if attaching punct to new_head causes any edge crossing."""
|
| 345 |
-
if new_head_idx < 0 or new_head_idx >= n:
|
| 346 |
-
return False
|
| 347 |
-
|
| 348 |
-
p_low, p_high = min(punct_idx, new_head_idx), max(punct_idx, new_head_idx)
|
| 349 |
-
|
| 350 |
-
# Check all other edges for crossing with this punct edge
|
| 351 |
-
for j in range(n):
|
| 352 |
-
if j == punct_idx:
|
| 353 |
-
continue
|
| 354 |
-
if head[j] > 0 and head[j] != punct_idx + 1: # j has a head and it's not punct
|
| 355 |
-
j_head = head[j] - 1
|
| 356 |
-
if j_head < 0 or j_head >= n:
|
| 357 |
-
continue
|
| 358 |
-
j_low, j_high = min(j, j_head), max(j, j_head)
|
| 359 |
-
|
| 360 |
-
# Check if edges cross (one endpoint inside, one outside)
|
| 361 |
-
# Edges cross if: (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high)
|
| 362 |
-
if (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high):
|
| 363 |
-
return True
|
| 364 |
-
|
| 365 |
-
return False
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
def compute_space_after(text, tokens):
|
| 369 |
-
"""Compute SpaceAfter=No for tokens based on original text."""
|
| 370 |
-
misc = []
|
| 371 |
-
pos = 0
|
| 372 |
-
for i, token in enumerate(tokens):
|
| 373 |
-
# Find token in text
|
| 374 |
-
token_start = text.find(token, pos)
|
| 375 |
-
if token_start == -1:
|
| 376 |
-
# Token not found, assume space after
|
| 377 |
-
misc.append("_")
|
| 378 |
-
continue
|
| 379 |
-
|
| 380 |
-
token_end = token_start + len(token)
|
| 381 |
-
pos = token_end
|
| 382 |
-
|
| 383 |
-
# Check if there's a space after this token
|
| 384 |
-
if token_end < len(text):
|
| 385 |
-
next_char = text[token_end]
|
| 386 |
-
if next_char in ' \t\n':
|
| 387 |
-
misc.append("_")
|
| 388 |
-
else:
|
| 389 |
-
misc.append("SpaceAfter=No")
|
| 390 |
-
else:
|
| 391 |
-
# End of text
|
| 392 |
-
misc.append("_")
|
| 393 |
-
|
| 394 |
-
return misc
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
def load_sentences(filepath):
|
| 398 |
-
"""Load sentences from sentences.txt or sentences_uvb.txt"""
|
| 399 |
-
sentences = []
|
| 400 |
-
with open(filepath, "r", encoding="utf-8") as f:
|
| 401 |
-
for line in f:
|
| 402 |
-
line = line.strip()
|
| 403 |
-
if line:
|
| 404 |
-
parts = line.split("\t")
|
| 405 |
-
# Handle both formats:
|
| 406 |
-
# sentences.txt: idx\tsentence
|
| 407 |
-
# sentences_uvb.txt: idx\tsource\tsentence
|
| 408 |
-
if len(parts) == 2:
|
| 409 |
-
sentences.append(parts[1])
|
| 410 |
-
elif len(parts) >= 3:
|
| 411 |
-
sentences.append(parts[2])
|
| 412 |
-
return sentences
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
def process_single_sentence(args):
|
| 416 |
-
"""Process a single sentence (used for parallel processing)."""
|
| 417 |
-
idx, text = args
|
| 418 |
-
sent_id = f"s{idx}"
|
| 419 |
-
|
| 420 |
-
try:
|
| 421 |
-
# Use dependency_parse for tokens, heads, and deprels
|
| 422 |
-
parsed = dependency_parse(text)
|
| 423 |
-
tokens = [t[0] for t in parsed]
|
| 424 |
-
head = [str(t[1]) for t in parsed]
|
| 425 |
-
deprel = [t[2] for t in parsed]
|
| 426 |
-
|
| 427 |
-
# Get POS tags
|
| 428 |
-
tagged = pos_tag(text)
|
| 429 |
-
if len(tagged) == len(tokens):
|
| 430 |
-
xpos = [t[1] for t in tagged]
|
| 431 |
-
upos = [to_upos(t[1], t[0]) for t in tagged]
|
| 432 |
-
else:
|
| 433 |
-
xpos = ['X'] * len(tokens)
|
| 434 |
-
upos = ['X'] * len(tokens)
|
| 435 |
-
|
| 436 |
-
except Exception as e:
|
| 437 |
-
# Fallback to pos_tag only
|
| 438 |
-
tagged = pos_tag(text)
|
| 439 |
-
tokens = [t[0] for t in tagged]
|
| 440 |
-
xpos = [t[1] for t in tagged]
|
| 441 |
-
upos = [to_upos(t[1], t[0]) for t in tagged]
|
| 442 |
-
head = ["0"] * len(tokens)
|
| 443 |
-
deprel = ["dep"] * len(tokens)
|
| 444 |
-
if len(tokens) > 0:
|
| 445 |
-
deprel[0] = "root"
|
| 446 |
-
|
| 447 |
-
# Apply syntax fixes
|
| 448 |
-
upos, head, deprel = fix_syntax_errors(tokens, upos, head, deprel)
|
| 449 |
-
|
| 450 |
-
# Create other fields
|
| 451 |
-
n = len(tokens)
|
| 452 |
-
lemmas = [t.lower() for t in tokens]
|
| 453 |
-
feats = ["_"] * n
|
| 454 |
-
deps = ["_"] * n
|
| 455 |
-
misc = compute_space_after(text, tokens)
|
| 456 |
-
|
| 457 |
-
return idx, {
|
| 458 |
-
"sent_id": sent_id,
|
| 459 |
-
"text": text,
|
| 460 |
-
"comments": [f"# sent_id = {sent_id}", f"# text = {text}"],
|
| 461 |
-
"tokens": tokens,
|
| 462 |
-
"lemmas": lemmas,
|
| 463 |
-
"upos": upos,
|
| 464 |
-
"xpos": xpos,
|
| 465 |
-
"feats": feats,
|
| 466 |
-
"head": head,
|
| 467 |
-
"deprel": deprel,
|
| 468 |
-
"deps": deps,
|
| 469 |
-
"misc": misc,
|
| 470 |
-
"mwt": [],
|
| 471 |
-
"empty_nodes": []
|
| 472 |
-
}
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
def convert_to_ud_format(sentences, batch_size=32, num_workers=4):
|
| 476 |
-
"""Convert sentences to UD format using dependency_parse with batch processing."""
|
| 477 |
-
global _models_loaded
|
| 478 |
-
|
| 479 |
-
# Pre-warm models with a dummy sentence to load them into GPU memory
|
| 480 |
-
if not _models_loaded:
|
| 481 |
-
print(" Loading models into GPU memory...")
|
| 482 |
-
_ = dependency_parse("Xin chào")
|
| 483 |
-
_ = pos_tag("Xin chào")
|
| 484 |
-
_models_loaded = True
|
| 485 |
-
print(" Models loaded.")
|
| 486 |
-
|
| 487 |
-
data = [None] * len(sentences)
|
| 488 |
-
total = len(sentences)
|
| 489 |
-
|
| 490 |
-
# Process in batches for better GPU utilization
|
| 491 |
-
print(f" Processing {total} sentences with batch_size={batch_size}...")
|
| 492 |
-
|
| 493 |
-
for batch_start in range(0, total, batch_size):
|
| 494 |
-
batch_end = min(batch_start + batch_size, total)
|
| 495 |
-
batch = [(i + 1, sentences[i]) for i in range(batch_start, batch_end)]
|
| 496 |
-
|
| 497 |
-
# Process batch - GPU models benefit from sequential calls within batch
|
| 498 |
-
# as they can better utilize GPU memory
|
| 499 |
-
for args in batch:
|
| 500 |
-
idx, row = process_single_sentence(args)
|
| 501 |
-
data[idx - 1] = row
|
| 502 |
-
|
| 503 |
-
# Progress update
|
| 504 |
-
processed = batch_end
|
| 505 |
-
if processed % 100 == 0 or processed == total:
|
| 506 |
-
print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)")
|
| 507 |
-
|
| 508 |
-
return data
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
def convert_to_ud_format_parallel(sentences, num_workers=None):
|
| 512 |
-
"""Convert sentences using multiple workers (CPU parallelism).
|
| 513 |
-
|
| 514 |
-
Note: This is useful when GPU is bottleneck or for CPU-only processing.
|
| 515 |
-
For GPU processing, use convert_to_ud_format with batch processing.
|
| 516 |
-
"""
|
| 517 |
-
global _models_loaded
|
| 518 |
-
|
| 519 |
-
if num_workers is None:
|
| 520 |
-
num_workers = min(4, multiprocessing.cpu_count())
|
| 521 |
-
|
| 522 |
-
# Pre-warm models
|
| 523 |
-
if not _models_loaded:
|
| 524 |
-
print(" Loading models...")
|
| 525 |
-
_ = dependency_parse("Xin chào")
|
| 526 |
-
_ = pos_tag("Xin chào")
|
| 527 |
-
_models_loaded = True
|
| 528 |
-
print(" Models loaded.")
|
| 529 |
-
|
| 530 |
-
data = [None] * len(sentences)
|
| 531 |
-
total = len(sentences)
|
| 532 |
-
processed = 0
|
| 533 |
-
|
| 534 |
-
print(f" Processing {total} sentences with {num_workers} workers...")
|
| 535 |
-
|
| 536 |
-
# Use ThreadPoolExecutor for I/O bound tasks with GPU
|
| 537 |
-
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
| 538 |
-
futures = {
|
| 539 |
-
executor.submit(process_single_sentence, (i + 1, sentences[i])): i
|
| 540 |
-
for i in range(total)
|
| 541 |
-
}
|
| 542 |
-
|
| 543 |
-
for future in as_completed(futures):
|
| 544 |
-
idx, row = future.result()
|
| 545 |
-
data[idx - 1] = row
|
| 546 |
-
processed += 1
|
| 547 |
-
|
| 548 |
-
if processed % 100 == 0 or processed == total:
|
| 549 |
-
print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)")
|
| 550 |
-
|
| 551 |
-
return data
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
def save_jsonl(data, filepath):
|
| 555 |
-
"""Save data as JSONL format."""
|
| 556 |
-
with open(filepath, "w", encoding="utf-8") as f:
|
| 557 |
-
for row in data:
|
| 558 |
-
f.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
def save_conllu(data, filepath):
|
| 562 |
-
"""Save data as CoNLL-U format."""
|
| 563 |
-
with open(filepath, "w", encoding="utf-8") as f:
|
| 564 |
-
for row in data:
|
| 565 |
-
f.write(f"# sent_id = {row['sent_id']}\n")
|
| 566 |
-
f.write(f"# text = {row['text']}\n")
|
| 567 |
-
for i in range(len(row['tokens'])):
|
| 568 |
-
# ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC
|
| 569 |
-
line = "\t".join([
|
| 570 |
-
str(i + 1),
|
| 571 |
-
row['tokens'][i],
|
| 572 |
-
row['lemmas'][i],
|
| 573 |
-
row['upos'][i],
|
| 574 |
-
row['xpos'][i],
|
| 575 |
-
row['feats'][i],
|
| 576 |
-
row['head'][i],
|
| 577 |
-
row['deprel'][i],
|
| 578 |
-
row['deps'][i],
|
| 579 |
-
row['misc'][i]
|
| 580 |
-
])
|
| 581 |
-
f.write(line + "\n")
|
| 582 |
-
f.write("\n")
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
def main():
|
| 586 |
-
import argparse
|
| 587 |
-
import time
|
| 588 |
-
parser = argparse.ArgumentParser(description="Convert sentences to UD format")
|
| 589 |
-
parser.add_argument("--input", "-i", type=str, help="Input sentences file")
|
| 590 |
-
parser.add_argument("--output-dir", "-o", type=str, help="Output directory")
|
| 591 |
-
parser.add_argument("--prefix", "-p", type=str, default="train", help="Output file prefix")
|
| 592 |
-
parser.add_argument("--batch-size", "-b", type=int, default=64,
|
| 593 |
-
help="Batch size for GPU processing (default: 64, increase for more GPU usage)")
|
| 594 |
-
parser.add_argument("--parallel", action="store_true",
|
| 595 |
-
help="Use parallel processing with multiple workers")
|
| 596 |
-
parser.add_argument("--workers", "-w", type=int, default=4,
|
| 597 |
-
help="Number of workers for parallel processing (default: 4)")
|
| 598 |
-
args = parser.parse_args()
|
| 599 |
-
|
| 600 |
-
# Default paths
|
| 601 |
-
if args.input:
|
| 602 |
-
sentences_file = args.input
|
| 603 |
-
else:
|
| 604 |
-
source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1")
|
| 605 |
-
sentences_file = join(source_folder, "sentences.txt")
|
| 606 |
-
|
| 607 |
-
if args.output_dir:
|
| 608 |
-
output_dir = args.output_dir
|
| 609 |
-
else:
|
| 610 |
-
output_dir = dirname(sentences_file)
|
| 611 |
-
|
| 612 |
-
print("Loading sentences...")
|
| 613 |
-
sentences = load_sentences(sentences_file)
|
| 614 |
-
print(f"Loaded {len(sentences)} sentences")
|
| 615 |
-
|
| 616 |
-
# Check GPU availability
|
| 617 |
-
if torch.cuda.is_available():
|
| 618 |
-
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
| 619 |
-
print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
|
| 620 |
-
else:
|
| 621 |
-
print("GPU: Not available (using CPU)")
|
| 622 |
-
|
| 623 |
-
print(f"\nConverting to UD format (batch_size={args.batch_size})...")
|
| 624 |
-
start_time = time.time()
|
| 625 |
-
|
| 626 |
-
if args.parallel:
|
| 627 |
-
data = convert_to_ud_format_parallel(sentences, num_workers=args.workers)
|
| 628 |
-
else:
|
| 629 |
-
data = convert_to_ud_format(sentences, batch_size=args.batch_size)
|
| 630 |
-
|
| 631 |
-
elapsed = time.time() - start_time
|
| 632 |
-
speed = len(sentences) / elapsed
|
| 633 |
-
print(f"\nCompleted in {elapsed:.1f}s ({speed:.1f} sentences/sec)")
|
| 634 |
-
|
| 635 |
-
# Save as JSONL (for HuggingFace)
|
| 636 |
-
jsonl_file = join(output_dir, f"{args.prefix}.jsonl")
|
| 637 |
-
save_jsonl(data, jsonl_file)
|
| 638 |
-
print(f"Saved JSONL to: {jsonl_file}")
|
| 639 |
-
|
| 640 |
-
# Save as CoNLL-U (standard UD format)
|
| 641 |
-
conllu_file = join(output_dir, f"{args.prefix}.conllu")
|
| 642 |
-
save_conllu(data, conllu_file)
|
| 643 |
-
print(f"Saved CoNLL-U to: {conllu_file}")
|
| 644 |
-
|
| 645 |
-
# Print sample
|
| 646 |
-
print("\nSample row:")
|
| 647 |
-
sample = data[0]
|
| 648 |
-
print(f" sent_id: {sample['sent_id']}")
|
| 649 |
-
print(f" text: {sample['text'][:60]}...")
|
| 650 |
-
print(f" tokens: {sample['tokens'][:5]}...")
|
| 651 |
-
print(f" upos: {sample['upos'][:5]}...")
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
if __name__ == "__main__":
|
| 655 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/fetch_data.py
DELETED
|
@@ -1,115 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Fetch data from HuggingFace dataset undertheseanlp/UTS_VLC
|
| 3 |
-
- Get documents from law dataset
|
| 4 |
-
- Segment sentences using underthesea
|
| 5 |
-
- Get first 3000 sentences
|
| 6 |
-
"""
|
| 7 |
-
|
| 8 |
-
import re
|
| 9 |
-
from os.path import dirname, join
|
| 10 |
-
|
| 11 |
-
from datasets import load_dataset
|
| 12 |
-
|
| 13 |
-
from underthesea import sent_tokenize, text_normalize
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
def clean_text(text):
|
| 17 |
-
"""Remove markdown formatting and clean text."""
|
| 18 |
-
# Normalize Unicode using underthesea
|
| 19 |
-
text = text_normalize(text)
|
| 20 |
-
# Remove markdown headers
|
| 21 |
-
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
|
| 22 |
-
# Remove bold/italic markers
|
| 23 |
-
text = re.sub(r'\*+', '', text)
|
| 24 |
-
# Remove horizontal rules
|
| 25 |
-
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
|
| 26 |
-
# Remove links
|
| 27 |
-
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
|
| 28 |
-
# Remove multiple newlines
|
| 29 |
-
text = re.sub(r'\n{2,}', '\n', text)
|
| 30 |
-
# Remove leading/trailing whitespace per line
|
| 31 |
-
lines = [line.strip() for line in text.split('\n')]
|
| 32 |
-
text = '\n'.join(lines)
|
| 33 |
-
return text
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
def is_valid_sentence(sent):
|
| 37 |
-
"""Check if sentence is valid for UD annotation."""
|
| 38 |
-
sent = sent.strip()
|
| 39 |
-
# Remove trailing list markers like "1." or "a)"
|
| 40 |
-
sent = re.sub(r'\n\d+\.$', '', sent)
|
| 41 |
-
sent = re.sub(r'\n[a-z]\)$', '', sent)
|
| 42 |
-
sent = sent.strip()
|
| 43 |
-
|
| 44 |
-
if not sent:
|
| 45 |
-
return False, sent
|
| 46 |
-
# Too short
|
| 47 |
-
if len(sent) < 20:
|
| 48 |
-
return False, sent
|
| 49 |
-
# Too long
|
| 50 |
-
if len(sent) > 300:
|
| 51 |
-
return False, sent
|
| 52 |
-
# Skip headers (all caps, or starts with "Điều", "Chương", etc.)
|
| 53 |
-
if re.match(r'^(QUỐC HỘI|CỘNG HÒA|Độc lập|Phần thứ|Chương [IVX]+|MỤC \d+)', sent):
|
| 54 |
-
return False, sent
|
| 55 |
-
# Skip article titles
|
| 56 |
-
if re.match(r'^(Điều \d+|Khoản \d+|Mục \d+)', sent):
|
| 57 |
-
return False, sent
|
| 58 |
-
# Skip if mostly uppercase
|
| 59 |
-
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5:
|
| 60 |
-
return False, sent
|
| 61 |
-
# Skip if starts with special markers
|
| 62 |
-
if sent.startswith(('English:', 'Số hiệu:', 'Ngày hiệu lực:', '---', '|')):
|
| 63 |
-
return False, sent
|
| 64 |
-
# Must contain Vietnamese characters
|
| 65 |
-
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
|
| 66 |
-
return False, sent
|
| 67 |
-
# Skip if ends with just a number (incomplete sentence)
|
| 68 |
-
if re.search(r'\n\d+$', sent):
|
| 69 |
-
return False, sent
|
| 70 |
-
return True, sent
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
def fetch_and_process():
|
| 74 |
-
# Load dataset from HuggingFace
|
| 75 |
-
print("Loading dataset from HuggingFace...")
|
| 76 |
-
ds = load_dataset("undertheseanlp/UTS_VLC", split="2026")
|
| 77 |
-
|
| 78 |
-
# Segment sentences from all documents until we have 3000
|
| 79 |
-
print("Segmenting sentences...")
|
| 80 |
-
all_sentences = []
|
| 81 |
-
for idx, doc in enumerate(ds):
|
| 82 |
-
content = doc["content"]
|
| 83 |
-
content = clean_text(content)
|
| 84 |
-
sentences = sent_tokenize(content)
|
| 85 |
-
for sent in sentences:
|
| 86 |
-
sent = sent.strip()
|
| 87 |
-
is_valid, cleaned_sent = is_valid_sentence(sent)
|
| 88 |
-
if is_valid:
|
| 89 |
-
all_sentences.append(cleaned_sent)
|
| 90 |
-
if len(all_sentences) >= 3000:
|
| 91 |
-
print(f"Processed {idx + 1} documents")
|
| 92 |
-
break
|
| 93 |
-
|
| 94 |
-
# Get first 3000 sentences
|
| 95 |
-
sentences_3000 = all_sentences[:3000]
|
| 96 |
-
print(f"Total sentences collected: {len(sentences_3000)}")
|
| 97 |
-
|
| 98 |
-
# Save to output file
|
| 99 |
-
output_dir = dirname(dirname(__file__))
|
| 100 |
-
output_file = join(output_dir, "sentences.txt")
|
| 101 |
-
|
| 102 |
-
with open(output_file, "w", encoding="utf-8") as f:
|
| 103 |
-
for i, sent in enumerate(sentences_3000, 1):
|
| 104 |
-
f.write(f"{i}\t{sent}\n")
|
| 105 |
-
|
| 106 |
-
print(f"Saved to: {output_file}")
|
| 107 |
-
|
| 108 |
-
# Print sample
|
| 109 |
-
print("\nSample sentences:")
|
| 110 |
-
for i, sent in enumerate(sentences_3000[:5], 1):
|
| 111 |
-
print(f" {i}. {sent[:80]}...")
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
if __name__ == "__main__":
|
| 115 |
-
fetch_and_process()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/fetch_uvb_data.py
DELETED
|
@@ -1,250 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Fetch data from HuggingFace dataset undertheseanlp/UVB-v0.1
|
| 3 |
-
- Get 5,000 high-quality sentences from fiction books
|
| 4 |
-
- Get 5,000 high-quality sentences from non-fiction books
|
| 5 |
-
"""
|
| 6 |
-
|
| 7 |
-
import re
|
| 8 |
-
from os.path import dirname, join
|
| 9 |
-
|
| 10 |
-
from datasets import load_dataset
|
| 11 |
-
from underthesea import sent_tokenize, text_normalize
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
# Fiction-related genres
|
| 15 |
-
FICTION_GENRES = {
|
| 16 |
-
"Fiction", "Novels", "Romance", "Fantasy", "Science Fiction",
|
| 17 |
-
"Mystery", "Thriller", "Horror", "Historical Fiction", "Literary Fiction",
|
| 18 |
-
"Adventure", "Crime", "Suspense", "Drama", "Short Stories"
|
| 19 |
-
}
|
| 20 |
-
|
| 21 |
-
# Non-fiction related genres
|
| 22 |
-
NON_FICTION_GENRES = {
|
| 23 |
-
"Non Fiction", "Nonfiction", "History", "Biography", "Autobiography",
|
| 24 |
-
"Self Help", "Psychology", "Philosophy", "Science", "Politics",
|
| 25 |
-
"Economics", "Business", "Education", "Travel", "Memoir",
|
| 26 |
-
"Essays", "Reference", "Health", "Religion", "Spirituality"
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def clean_text(text):
|
| 31 |
-
"""Remove formatting and clean text."""
|
| 32 |
-
# Normalize Unicode using underthesea
|
| 33 |
-
text = text_normalize(text)
|
| 34 |
-
# Remove markdown headers
|
| 35 |
-
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
|
| 36 |
-
# Remove bold/italic markers
|
| 37 |
-
text = re.sub(r'\*+', '', text)
|
| 38 |
-
# Remove horizontal rules
|
| 39 |
-
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
|
| 40 |
-
# Remove links
|
| 41 |
-
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
|
| 42 |
-
# Remove multiple newlines
|
| 43 |
-
text = re.sub(r'\n{2,}', '\n', text)
|
| 44 |
-
# Remove leading/trailing whitespace per line
|
| 45 |
-
lines = [line.strip() for line in text.split('\n')]
|
| 46 |
-
text = '\n'.join(lines)
|
| 47 |
-
return text
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
def is_high_quality_sentence(sent):
|
| 51 |
-
"""Check if sentence is high quality for UD annotation."""
|
| 52 |
-
sent = sent.strip()
|
| 53 |
-
|
| 54 |
-
if not sent:
|
| 55 |
-
return False, sent
|
| 56 |
-
|
| 57 |
-
# Length constraints
|
| 58 |
-
if len(sent) < 30: # Minimum length for meaningful sentence
|
| 59 |
-
return False, sent
|
| 60 |
-
if len(sent) > 250: # Maximum length
|
| 61 |
-
return False, sent
|
| 62 |
-
|
| 63 |
-
# Word count constraints
|
| 64 |
-
words = sent.split()
|
| 65 |
-
if len(words) < 5: # At least 5 words
|
| 66 |
-
return False, sent
|
| 67 |
-
if len(words) > 40: # Max 40 words
|
| 68 |
-
return False, sent
|
| 69 |
-
|
| 70 |
-
# Must start with uppercase letter (proper sentence)
|
| 71 |
-
if not sent[0].isupper():
|
| 72 |
-
return False, sent
|
| 73 |
-
|
| 74 |
-
# Must end with proper punctuation
|
| 75 |
-
if not sent.rstrip()[-1] in '.!?…"»':
|
| 76 |
-
return False, sent
|
| 77 |
-
|
| 78 |
-
# Skip if mostly uppercase (headers, titles)
|
| 79 |
-
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.3:
|
| 80 |
-
return False, sent
|
| 81 |
-
|
| 82 |
-
# Must contain Vietnamese characters
|
| 83 |
-
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
|
| 84 |
-
return False, sent
|
| 85 |
-
|
| 86 |
-
# Skip sentences with too many numbers (tables, lists)
|
| 87 |
-
num_digits = sum(1 for c in sent if c.isdigit())
|
| 88 |
-
if num_digits > len(sent) * 0.15:
|
| 89 |
-
return False, sent
|
| 90 |
-
|
| 91 |
-
# Skip sentences with special patterns
|
| 92 |
-
if re.match(r'^(Chương|Phần|Mục|Điều|\d+\.|\([a-z]\))', sent):
|
| 93 |
-
return False, sent
|
| 94 |
-
|
| 95 |
-
# Skip sentences with URLs or emails
|
| 96 |
-
if re.search(r'(http|www\.|@|\.com|\.vn)', sent, re.IGNORECASE):
|
| 97 |
-
return False, sent
|
| 98 |
-
|
| 99 |
-
# Skip sentences with excessive punctuation
|
| 100 |
-
punct_count = sum(1 for c in sent if c in '.,;:!?-–—()[]{}""\'\'«»')
|
| 101 |
-
if punct_count > len(words) * 1.5:
|
| 102 |
-
return False, sent
|
| 103 |
-
|
| 104 |
-
# Skip incomplete sentences (ending with ellipsis in middle)
|
| 105 |
-
if '...' in sent[:-5]:
|
| 106 |
-
return False, sent
|
| 107 |
-
|
| 108 |
-
# Skip dialogue-heavy sentences (too many quotes)
|
| 109 |
-
quote_count = sent.count('"') + sent.count('"') + sent.count('"')
|
| 110 |
-
if quote_count > 4:
|
| 111 |
-
return False, sent
|
| 112 |
-
|
| 113 |
-
return True, sent
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
def classify_book(genres):
|
| 117 |
-
"""Classify book as fiction or non-fiction based on genres."""
|
| 118 |
-
if not genres:
|
| 119 |
-
return None
|
| 120 |
-
|
| 121 |
-
genres_set = set(genres)
|
| 122 |
-
|
| 123 |
-
is_fiction = bool(genres_set & FICTION_GENRES)
|
| 124 |
-
is_non_fiction = bool(genres_set & NON_FICTION_GENRES)
|
| 125 |
-
|
| 126 |
-
if is_fiction and not is_non_fiction:
|
| 127 |
-
return "fiction"
|
| 128 |
-
elif is_non_fiction and not is_fiction:
|
| 129 |
-
return "non-fiction"
|
| 130 |
-
elif is_fiction and is_non_fiction:
|
| 131 |
-
# Prefer the dominant one
|
| 132 |
-
fiction_count = len(genres_set & FICTION_GENRES)
|
| 133 |
-
non_fiction_count = len(genres_set & NON_FICTION_GENRES)
|
| 134 |
-
return "fiction" if fiction_count > non_fiction_count else "non-fiction"
|
| 135 |
-
|
| 136 |
-
return None
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
def extract_sentences_from_book(content, max_sentences=500):
|
| 140 |
-
"""Extract high-quality sentences from book content."""
|
| 141 |
-
content = clean_text(content)
|
| 142 |
-
sentences = sent_tokenize(content)
|
| 143 |
-
|
| 144 |
-
valid_sentences = []
|
| 145 |
-
for sent in sentences:
|
| 146 |
-
is_valid, cleaned_sent = is_high_quality_sentence(sent)
|
| 147 |
-
if is_valid:
|
| 148 |
-
valid_sentences.append(cleaned_sent)
|
| 149 |
-
if len(valid_sentences) >= max_sentences:
|
| 150 |
-
break
|
| 151 |
-
|
| 152 |
-
return valid_sentences
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
def fetch_and_process():
|
| 156 |
-
print("Loading UVB-v0.1 dataset from HuggingFace...")
|
| 157 |
-
ds = load_dataset("undertheseanlp/UVB-v0.1", split="train")
|
| 158 |
-
|
| 159 |
-
print(f"Total books in dataset: {len(ds)}")
|
| 160 |
-
|
| 161 |
-
# Classify books
|
| 162 |
-
fiction_books = []
|
| 163 |
-
non_fiction_books = []
|
| 164 |
-
|
| 165 |
-
for book in ds:
|
| 166 |
-
genres = book.get("genres", [])
|
| 167 |
-
rating = book.get("goodreads_rating", 0) or 0
|
| 168 |
-
num_ratings = book.get("goodreads_num_ratings", 0) or 0
|
| 169 |
-
|
| 170 |
-
# Quality filter: prefer books with good ratings
|
| 171 |
-
quality_score = rating * min(num_ratings / 100, 10) # Weight by rating count
|
| 172 |
-
|
| 173 |
-
book_type = classify_book(genres)
|
| 174 |
-
book_info = {
|
| 175 |
-
"title": book["title"],
|
| 176 |
-
"content": book["content"],
|
| 177 |
-
"rating": rating,
|
| 178 |
-
"num_ratings": num_ratings,
|
| 179 |
-
"quality_score": quality_score,
|
| 180 |
-
"genres": genres
|
| 181 |
-
}
|
| 182 |
-
|
| 183 |
-
if book_type == "fiction":
|
| 184 |
-
fiction_books.append(book_info)
|
| 185 |
-
elif book_type == "non-fiction":
|
| 186 |
-
non_fiction_books.append(book_info)
|
| 187 |
-
|
| 188 |
-
print(f"Fiction books: {len(fiction_books)}")
|
| 189 |
-
print(f"Non-fiction books: {len(non_fiction_books)}")
|
| 190 |
-
|
| 191 |
-
# Sort by quality score (higher is better)
|
| 192 |
-
fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)
|
| 193 |
-
non_fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)
|
| 194 |
-
|
| 195 |
-
# Extract sentences from fiction books
|
| 196 |
-
print("\nExtracting sentences from fiction books...")
|
| 197 |
-
fiction_sentences = []
|
| 198 |
-
for i, book in enumerate(fiction_books):
|
| 199 |
-
if len(fiction_sentences) >= 5000:
|
| 200 |
-
break
|
| 201 |
-
sentences = extract_sentences_from_book(book["content"])
|
| 202 |
-
for sent in sentences:
|
| 203 |
-
if len(fiction_sentences) >= 5000:
|
| 204 |
-
break
|
| 205 |
-
fiction_sentences.append(sent)
|
| 206 |
-
print(f" [{i+1}/{len(fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(fiction_sentences)})")
|
| 207 |
-
|
| 208 |
-
# Extract sentences from non-fiction books
|
| 209 |
-
print("\nExtracting sentences from non-fiction books...")
|
| 210 |
-
non_fiction_sentences = []
|
| 211 |
-
for i, book in enumerate(non_fiction_books):
|
| 212 |
-
if len(non_fiction_sentences) >= 5000:
|
| 213 |
-
break
|
| 214 |
-
sentences = extract_sentences_from_book(book["content"])
|
| 215 |
-
for sent in sentences:
|
| 216 |
-
if len(non_fiction_sentences) >= 5000:
|
| 217 |
-
break
|
| 218 |
-
non_fiction_sentences.append(sent)
|
| 219 |
-
print(f" [{i+1}/{len(non_fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(non_fiction_sentences)})")
|
| 220 |
-
|
| 221 |
-
print(f"\nFiction sentences collected: {len(fiction_sentences)}")
|
| 222 |
-
print(f"Non-fiction sentences collected: {len(non_fiction_sentences)}")
|
| 223 |
-
|
| 224 |
-
# Combine all sentences
|
| 225 |
-
all_sentences = fiction_sentences[:5000] + non_fiction_sentences[:5000]
|
| 226 |
-
print(f"Total sentences: {len(all_sentences)}")
|
| 227 |
-
|
| 228 |
-
# Save to output file
|
| 229 |
-
output_dir = dirname(dirname(__file__))
|
| 230 |
-
output_file = join(output_dir, "sentences_uvb.txt")
|
| 231 |
-
|
| 232 |
-
with open(output_file, "w", encoding="utf-8") as f:
|
| 233 |
-
for i, sent in enumerate(all_sentences, 1):
|
| 234 |
-
source = "fiction" if i <= len(fiction_sentences[:5000]) else "non-fiction"
|
| 235 |
-
f.write(f"{i}\t{source}\t{sent}\n")
|
| 236 |
-
|
| 237 |
-
print(f"\nSaved to: {output_file}")
|
| 238 |
-
|
| 239 |
-
# Print samples
|
| 240 |
-
print("\nSample fiction sentences:")
|
| 241 |
-
for i, sent in enumerate(fiction_sentences[:3], 1):
|
| 242 |
-
print(f" {i}. {sent[:100]}...")
|
| 243 |
-
|
| 244 |
-
print("\nSample non-fiction sentences:")
|
| 245 |
-
for i, sent in enumerate(non_fiction_sentences[:3], 1):
|
| 246 |
-
print(f" {i}. {sent[:100]}...")
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
if __name__ == "__main__":
|
| 250 |
-
fetch_and_process()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/upload_to_hf.py
DELETED
|
@@ -1,80 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Upload UD dataset to HuggingFace Hub.
|
| 3 |
-
Dataset: undertheseanlp/UDD-v0.1
|
| 4 |
-
|
| 5 |
-
Usage:
|
| 6 |
-
export $(cat .env | xargs) && python upload_to_hf.py
|
| 7 |
-
"""
|
| 8 |
-
|
| 9 |
-
import json
|
| 10 |
-
import os
|
| 11 |
-
from os.path import expanduser, join
|
| 12 |
-
|
| 13 |
-
from datasets import Dataset, DatasetDict
|
| 14 |
-
from huggingface_hub import HfApi, login
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
def load_jsonl(filepath):
|
| 18 |
-
"""Load JSONL file."""
|
| 19 |
-
data = []
|
| 20 |
-
with open(filepath, "r", encoding="utf-8") as f:
|
| 21 |
-
for line in f:
|
| 22 |
-
data.append(json.loads(line))
|
| 23 |
-
return data
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
def main():
|
| 27 |
-
# Login with token from environment
|
| 28 |
-
token = os.environ.get("HF_TOKEN")
|
| 29 |
-
if token:
|
| 30 |
-
print("Logging in with HF_TOKEN...")
|
| 31 |
-
login(token=token)
|
| 32 |
-
else:
|
| 33 |
-
print("Warning: HF_TOKEN not set. Using cached credentials.")
|
| 34 |
-
|
| 35 |
-
source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1")
|
| 36 |
-
jsonl_file = join(source_folder, "train.jsonl")
|
| 37 |
-
readme_file = join(source_folder, "README.md")
|
| 38 |
-
|
| 39 |
-
print("Loading data...")
|
| 40 |
-
data = load_jsonl(jsonl_file)
|
| 41 |
-
print(f"Loaded {len(data)} sentences")
|
| 42 |
-
|
| 43 |
-
# Create HuggingFace Dataset
|
| 44 |
-
print("Creating HuggingFace Dataset...")
|
| 45 |
-
dataset = Dataset.from_list(data)
|
| 46 |
-
|
| 47 |
-
# Create DatasetDict with train split
|
| 48 |
-
dataset_dict = DatasetDict({
|
| 49 |
-
"train": dataset
|
| 50 |
-
})
|
| 51 |
-
|
| 52 |
-
print(f"Dataset: {dataset_dict}")
|
| 53 |
-
print(f"Features: {dataset.features}")
|
| 54 |
-
|
| 55 |
-
# Push to HuggingFace Hub
|
| 56 |
-
repo_id = "undertheseanlp/UDD-v0.1"
|
| 57 |
-
print(f"\nPushing to HuggingFace Hub: {repo_id}")
|
| 58 |
-
|
| 59 |
-
dataset_dict.push_to_hub(
|
| 60 |
-
repo_id,
|
| 61 |
-
private=False,
|
| 62 |
-
commit_message="Update: 1000 sentences from Vietnamese Legal Corpus"
|
| 63 |
-
)
|
| 64 |
-
|
| 65 |
-
# Upload README.md
|
| 66 |
-
print("Uploading README.md...")
|
| 67 |
-
api = HfApi()
|
| 68 |
-
api.upload_file(
|
| 69 |
-
path_or_fileobj=readme_file,
|
| 70 |
-
path_in_repo="README.md",
|
| 71 |
-
repo_id=repo_id,
|
| 72 |
-
repo_type="dataset",
|
| 73 |
-
commit_message="Add README with dataset card"
|
| 74 |
-
)
|
| 75 |
-
|
| 76 |
-
print(f"\nDone! Dataset available at: https://huggingface.co/datasets/{repo_id}")
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
if __name__ == "__main__":
|
| 80 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/eval_segmentation.py
ADDED
|
@@ -0,0 +1,1021 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Evaluate word segmentation quality of UDD-1 treebank.
|
| 3 |
+
|
| 4 |
+
Analyses:
|
| 5 |
+
1. Syllable distribution per token (by UPOS)
|
| 6 |
+
2. Anomalous token detection (long tokens, cross-boundary merges, legal terms)
|
| 7 |
+
3. Inconsistent segmentation (bigram vs single token)
|
| 8 |
+
4. Comparison with underthesea word_tokenize() (optional)
|
| 9 |
+
5. Manual review samples
|
| 10 |
+
6. Dictionary-based validation (optional) — uses Viet74K/UTS_Dictionary
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import argparse
|
| 14 |
+
import random
|
| 15 |
+
import re
|
| 16 |
+
import sys
|
| 17 |
+
from collections import Counter, defaultdict
|
| 18 |
+
from os.path import dirname, join, exists
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Legal terms to check segmentation consistency
|
| 22 |
+
LEGAL_TERMS = [
|
| 23 |
+
"vụ án", "hợp đồng", "tài sản", "pháp luật", "quy định",
|
| 24 |
+
"nghị định", "cơ quan", "tổ chức", "cá nhân", "trách nhiệm",
|
| 25 |
+
"quyền lợi", "nghĩa vụ", "xử phạt", "vi phạm", "bồi thường",
|
| 26 |
+
"thẩm quyền", "giải quyết", "khiếu nại", "tố cáo", "hình sự",
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def parse_conllu(filepath):
|
| 31 |
+
"""Parse CoNLL-U file and return sentences with full token info."""
|
| 32 |
+
sentences = []
|
| 33 |
+
current = {
|
| 34 |
+
"sent_id": None,
|
| 35 |
+
"text": None,
|
| 36 |
+
"tokens": [],
|
| 37 |
+
"upos": [],
|
| 38 |
+
"xpos": [],
|
| 39 |
+
"deprel": [],
|
| 40 |
+
"head": [],
|
| 41 |
+
"lemmas": [],
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 45 |
+
for line in f:
|
| 46 |
+
line = line.rstrip("\n")
|
| 47 |
+
if not line.strip():
|
| 48 |
+
if current["tokens"]:
|
| 49 |
+
sentences.append(current)
|
| 50 |
+
current = {
|
| 51 |
+
"sent_id": None,
|
| 52 |
+
"text": None,
|
| 53 |
+
"tokens": [],
|
| 54 |
+
"upos": [],
|
| 55 |
+
"xpos": [],
|
| 56 |
+
"deprel": [],
|
| 57 |
+
"head": [],
|
| 58 |
+
"lemmas": [],
|
| 59 |
+
}
|
| 60 |
+
elif line.startswith("#"):
|
| 61 |
+
if line.startswith("# sent_id"):
|
| 62 |
+
current["sent_id"] = line.split("=", 1)[1].strip()
|
| 63 |
+
elif line.startswith("# text"):
|
| 64 |
+
current["text"] = line.split("=", 1)[1].strip()
|
| 65 |
+
else:
|
| 66 |
+
parts = line.split("\t")
|
| 67 |
+
if len(parts) >= 10:
|
| 68 |
+
if "-" in parts[0] or "." in parts[0]:
|
| 69 |
+
continue
|
| 70 |
+
current["tokens"].append(parts[1])
|
| 71 |
+
current["lemmas"].append(parts[2])
|
| 72 |
+
current["upos"].append(parts[3])
|
| 73 |
+
current["xpos"].append(parts[4])
|
| 74 |
+
current["head"].append(parts[6])
|
| 75 |
+
current["deprel"].append(parts[7])
|
| 76 |
+
|
| 77 |
+
if current["tokens"]:
|
| 78 |
+
sentences.append(current)
|
| 79 |
+
|
| 80 |
+
return sentences
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def count_syllables(token):
|
| 84 |
+
"""Count syllables in a Vietnamese token (space-separated)."""
|
| 85 |
+
return len(token.split())
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# ---- Analysis 1: Syllable distribution ----
|
| 89 |
+
|
| 90 |
+
def analysis_syllable_distribution(sentences):
|
| 91 |
+
"""Compute syllable count distribution per token, overall and by UPOS."""
|
| 92 |
+
overall = Counter()
|
| 93 |
+
by_upos = defaultdict(Counter)
|
| 94 |
+
total_tokens = 0
|
| 95 |
+
|
| 96 |
+
for sent in sentences:
|
| 97 |
+
for token, upos in zip(sent["tokens"], sent["upos"]):
|
| 98 |
+
n = count_syllables(token)
|
| 99 |
+
overall[n] += 1
|
| 100 |
+
by_upos[upos][n] += 1
|
| 101 |
+
total_tokens += 1
|
| 102 |
+
|
| 103 |
+
return overall, by_upos, total_tokens
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def format_syllable_report(overall, by_upos, total_tokens):
|
| 107 |
+
"""Format syllable distribution as markdown."""
|
| 108 |
+
lines = []
|
| 109 |
+
lines.append("## 1. Syllable Distribution per Token")
|
| 110 |
+
lines.append("")
|
| 111 |
+
lines.append("### 1.1 Overall Distribution")
|
| 112 |
+
lines.append("")
|
| 113 |
+
lines.append("| Syllables | Count | Percentage |")
|
| 114 |
+
lines.append("|---:|---:|---:|")
|
| 115 |
+
|
| 116 |
+
for n in sorted(overall.keys()):
|
| 117 |
+
label = f"{n}" if n < 4 else "4+"
|
| 118 |
+
if n == 4:
|
| 119 |
+
count = sum(overall[k] for k in overall if k >= 4)
|
| 120 |
+
pct = count / total_tokens * 100
|
| 121 |
+
lines.append(f"| {label} | {count:,} | {pct:.2f}% |")
|
| 122 |
+
break
|
| 123 |
+
else:
|
| 124 |
+
count = overall[n]
|
| 125 |
+
pct = count / total_tokens * 100
|
| 126 |
+
lines.append(f"| {label} | {count:,} | {pct:.2f}% |")
|
| 127 |
+
|
| 128 |
+
# Check if there are 4+ syllable tokens not yet printed
|
| 129 |
+
if max(overall.keys()) >= 4 and 4 not in overall:
|
| 130 |
+
count = sum(overall[k] for k in overall if k >= 4)
|
| 131 |
+
pct = count / total_tokens * 100
|
| 132 |
+
lines.append(f"| 4+ | {count:,} | {pct:.2f}% |")
|
| 133 |
+
|
| 134 |
+
lines.append("")
|
| 135 |
+
lines.append("### 1.2 Distribution by UPOS")
|
| 136 |
+
lines.append("")
|
| 137 |
+
|
| 138 |
+
# Top UPOS tags by frequency
|
| 139 |
+
upos_totals = {upos: sum(counts.values()) for upos, counts in by_upos.items()}
|
| 140 |
+
top_upos = sorted(upos_totals, key=upos_totals.get, reverse=True)[:10]
|
| 141 |
+
|
| 142 |
+
lines.append("| UPOS | 1-syl | 2-syl | 3-syl | 4+-syl | Total | Avg syl |")
|
| 143 |
+
lines.append("|:---|---:|---:|---:|---:|---:|---:|")
|
| 144 |
+
|
| 145 |
+
for upos in top_upos:
|
| 146 |
+
counts = by_upos[upos]
|
| 147 |
+
total = upos_totals[upos]
|
| 148 |
+
s1 = counts.get(1, 0)
|
| 149 |
+
s2 = counts.get(2, 0)
|
| 150 |
+
s3 = counts.get(3, 0)
|
| 151 |
+
s4p = sum(counts[k] for k in counts if k >= 4)
|
| 152 |
+
avg = sum(k * counts[k] for k in counts) / total if total else 0
|
| 153 |
+
lines.append(
|
| 154 |
+
f"| {upos} | {s1:,} | {s2:,} | {s3:,} | {s4p:,} | {total:,} | {avg:.2f} |"
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
lines.append("")
|
| 158 |
+
return "\n".join(lines)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# ---- Analysis 2: Anomalous tokens ----
|
| 162 |
+
|
| 163 |
+
def analysis_anomalous_tokens(sentences):
|
| 164 |
+
"""Find anomalous tokens: long tokens, cross-boundary merges, legal term consistency."""
|
| 165 |
+
|
| 166 |
+
# 2a: Long tokens (4+ syllables)
|
| 167 |
+
long_tokens = []
|
| 168 |
+
for sent in sentences:
|
| 169 |
+
for i, (token, upos) in enumerate(zip(sent["tokens"], sent["upos"])):
|
| 170 |
+
n = count_syllables(token)
|
| 171 |
+
if n >= 4:
|
| 172 |
+
long_tokens.append({
|
| 173 |
+
"sent_id": sent["sent_id"],
|
| 174 |
+
"token": token,
|
| 175 |
+
"upos": upos,
|
| 176 |
+
"syllables": n,
|
| 177 |
+
})
|
| 178 |
+
|
| 179 |
+
long_token_counter = Counter(t["token"] for t in long_tokens)
|
| 180 |
+
|
| 181 |
+
# 2b: Cross-boundary merges (uppercase letter after space inside token)
|
| 182 |
+
# Indicates possible incorrect merging of adjacent words
|
| 183 |
+
cross_boundary = []
|
| 184 |
+
for sent in sentences:
|
| 185 |
+
for i, (token, upos) in enumerate(zip(sent["tokens"], sent["upos"])):
|
| 186 |
+
if upos == "PROPN":
|
| 187 |
+
continue # Proper nouns naturally have capitals
|
| 188 |
+
if " " not in token:
|
| 189 |
+
continue
|
| 190 |
+
# Check if any syllable after the first starts with uppercase
|
| 191 |
+
syllables = token.split()
|
| 192 |
+
has_mid_upper = any(s[0].isupper() for s in syllables[1:] if s)
|
| 193 |
+
if has_mid_upper:
|
| 194 |
+
cross_boundary.append({
|
| 195 |
+
"sent_id": sent["sent_id"],
|
| 196 |
+
"token": token,
|
| 197 |
+
"upos": upos,
|
| 198 |
+
})
|
| 199 |
+
|
| 200 |
+
cross_boundary_counter = Counter(t["token"] for t in cross_boundary)
|
| 201 |
+
|
| 202 |
+
# 2c: Legal term segmentation consistency
|
| 203 |
+
legal_term_stats = {}
|
| 204 |
+
for term in LEGAL_TERMS:
|
| 205 |
+
parts = term.split()
|
| 206 |
+
as_single = 0 # Found as single token
|
| 207 |
+
as_split = 0 # Found as adjacent tokens (split)
|
| 208 |
+
|
| 209 |
+
for sent in sentences:
|
| 210 |
+
tokens = sent["tokens"]
|
| 211 |
+
# Check as single token
|
| 212 |
+
for token in tokens:
|
| 213 |
+
if token.lower() == term:
|
| 214 |
+
as_single += 1
|
| 215 |
+
# Check as split (adjacent tokens matching)
|
| 216 |
+
if len(parts) == 2:
|
| 217 |
+
for j in range(len(tokens) - 1):
|
| 218 |
+
if tokens[j].lower() == parts[0] and tokens[j + 1].lower() == parts[1]:
|
| 219 |
+
as_split += 1
|
| 220 |
+
|
| 221 |
+
if as_single > 0 or as_split > 0:
|
| 222 |
+
legal_term_stats[term] = {
|
| 223 |
+
"as_single": as_single,
|
| 224 |
+
"as_split": as_split,
|
| 225 |
+
"total": as_single + as_split,
|
| 226 |
+
"consistency": max(as_single, as_split) / (as_single + as_split) * 100
|
| 227 |
+
if (as_single + as_split) > 0 else 0,
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
return long_tokens, long_token_counter, cross_boundary, cross_boundary_counter, legal_term_stats
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def format_anomalous_report(long_tokens, long_token_counter, cross_boundary,
|
| 234 |
+
cross_boundary_counter, legal_term_stats):
|
| 235 |
+
"""Format anomalous token report as markdown."""
|
| 236 |
+
lines = []
|
| 237 |
+
lines.append("## 2. Anomalous Token Detection")
|
| 238 |
+
lines.append("")
|
| 239 |
+
|
| 240 |
+
# 2a: Long tokens
|
| 241 |
+
lines.append("### 2a. Long Tokens (4+ syllables)")
|
| 242 |
+
lines.append("")
|
| 243 |
+
lines.append(f"Total occurrences: {len(long_tokens):,}")
|
| 244 |
+
lines.append(f"Unique tokens: {len(long_token_counter):,}")
|
| 245 |
+
lines.append("")
|
| 246 |
+
lines.append("**Top 30 by frequency:**")
|
| 247 |
+
lines.append("")
|
| 248 |
+
lines.append("| Token | Count | UPOS | Syllables |")
|
| 249 |
+
lines.append("|:---|---:|:---|---:|")
|
| 250 |
+
for token, count in long_token_counter.most_common(30):
|
| 251 |
+
# Find first occurrence for UPOS
|
| 252 |
+
upos = next(t["upos"] for t in long_tokens if t["token"] == token)
|
| 253 |
+
n_syl = count_syllables(token)
|
| 254 |
+
lines.append(f"| {token} | {count} | {upos} | {n_syl} |")
|
| 255 |
+
lines.append("")
|
| 256 |
+
|
| 257 |
+
# 2b: Cross-boundary merges
|
| 258 |
+
lines.append("### 2b. Possible Cross-Boundary Merges")
|
| 259 |
+
lines.append("")
|
| 260 |
+
lines.append("Tokens (non-PROPN) with uppercase letters after spaces, suggesting")
|
| 261 |
+
lines.append("incorrect merging of adjacent words.")
|
| 262 |
+
lines.append("")
|
| 263 |
+
lines.append(f"Total occurrences: {len(cross_boundary):,}")
|
| 264 |
+
lines.append(f"Unique tokens: {len(cross_boundary_counter):,}")
|
| 265 |
+
lines.append("")
|
| 266 |
+
if cross_boundary_counter:
|
| 267 |
+
lines.append("| Token | Count | UPOS | Example sent_id |")
|
| 268 |
+
lines.append("|:---|---:|:---|:---|")
|
| 269 |
+
for token, count in cross_boundary_counter.most_common(30):
|
| 270 |
+
example = next(t for t in cross_boundary if t["token"] == token)
|
| 271 |
+
lines.append(
|
| 272 |
+
f"| {token} | {count} | {example['upos']} | {example['sent_id']} |"
|
| 273 |
+
)
|
| 274 |
+
else:
|
| 275 |
+
lines.append("No cross-boundary merges detected.")
|
| 276 |
+
lines.append("")
|
| 277 |
+
|
| 278 |
+
# 2c: Legal terms
|
| 279 |
+
lines.append("### 2c. Legal Term Segmentation Consistency")
|
| 280 |
+
lines.append("")
|
| 281 |
+
lines.append("| Term | As Single Token | As Split Tokens | Total | Consistency |")
|
| 282 |
+
lines.append("|:---|---:|---:|---:|---:|")
|
| 283 |
+
for term in sorted(legal_term_stats, key=lambda t: legal_term_stats[t]["total"], reverse=True):
|
| 284 |
+
s = legal_term_stats[term]
|
| 285 |
+
dominant = "single" if s["as_single"] >= s["as_split"] else "split"
|
| 286 |
+
lines.append(
|
| 287 |
+
f"| {term} | {s['as_single']:,} | {s['as_split']:,} | {s['total']:,} | {s['consistency']:.1f}% ({dominant}) |"
|
| 288 |
+
)
|
| 289 |
+
lines.append("")
|
| 290 |
+
return "\n".join(lines)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
# ---- Analysis 3: Inconsistent segmentation ----
|
| 294 |
+
|
| 295 |
+
def analysis_inconsistency(sentences):
|
| 296 |
+
"""Find bigrams that also appear as single tokens elsewhere."""
|
| 297 |
+
# Build token vocabulary
|
| 298 |
+
token_set = set()
|
| 299 |
+
for sent in sentences:
|
| 300 |
+
for token in sent["tokens"]:
|
| 301 |
+
token_set.add(token.lower())
|
| 302 |
+
|
| 303 |
+
# Find bigrams that exist as single tokens
|
| 304 |
+
bigram_as_single = Counter()
|
| 305 |
+
single_as_bigram = Counter()
|
| 306 |
+
|
| 307 |
+
for sent in sentences:
|
| 308 |
+
tokens = sent["tokens"]
|
| 309 |
+
for i in range(len(tokens) - 1):
|
| 310 |
+
bigram = tokens[i].lower() + " " + tokens[i + 1].lower()
|
| 311 |
+
if bigram in token_set:
|
| 312 |
+
bigram_as_single[bigram] += 1
|
| 313 |
+
|
| 314 |
+
# Count how often each of those appears as a single token
|
| 315 |
+
for sent in sentences:
|
| 316 |
+
for token in sent["tokens"]:
|
| 317 |
+
t = token.lower()
|
| 318 |
+
if t in bigram_as_single:
|
| 319 |
+
single_as_bigram[t] += 1
|
| 320 |
+
|
| 321 |
+
# Combine
|
| 322 |
+
inconsistencies = {}
|
| 323 |
+
for bigram in bigram_as_single:
|
| 324 |
+
inconsistencies[bigram] = {
|
| 325 |
+
"as_split": bigram_as_single[bigram],
|
| 326 |
+
"as_single": single_as_bigram.get(bigram, 0),
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
return inconsistencies
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def format_inconsistency_report(inconsistencies):
|
| 333 |
+
"""Format inconsistency report as markdown."""
|
| 334 |
+
lines = []
|
| 335 |
+
lines.append("## 3. Inconsistent Segmentation")
|
| 336 |
+
lines.append("")
|
| 337 |
+
lines.append("Cases where two adjacent tokens appear elsewhere as a single token,")
|
| 338 |
+
lines.append("or vice versa. Sorted by total occurrences.")
|
| 339 |
+
lines.append("")
|
| 340 |
+
|
| 341 |
+
if not inconsistencies:
|
| 342 |
+
lines.append("No inconsistencies found.")
|
| 343 |
+
lines.append("")
|
| 344 |
+
return "\n".join(lines)
|
| 345 |
+
|
| 346 |
+
lines.append(f"Total inconsistent forms: {len(inconsistencies):,}")
|
| 347 |
+
lines.append("")
|
| 348 |
+
lines.append("| Token | As Single | As Split (bigram) | Total |")
|
| 349 |
+
lines.append("|:---|---:|---:|---:|")
|
| 350 |
+
|
| 351 |
+
sorted_items = sorted(
|
| 352 |
+
inconsistencies.items(),
|
| 353 |
+
key=lambda x: x[1]["as_single"] + x[1]["as_split"],
|
| 354 |
+
reverse=True,
|
| 355 |
+
)
|
| 356 |
+
for token, stats in sorted_items[:50]:
|
| 357 |
+
total = stats["as_single"] + stats["as_split"]
|
| 358 |
+
lines.append(
|
| 359 |
+
f"| {token} | {stats['as_single']:,} | {stats['as_split']:,} | {total:,} |"
|
| 360 |
+
)
|
| 361 |
+
lines.append("")
|
| 362 |
+
return "\n".join(lines)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
# ---- Analysis 4: Comparison with word_tokenize ----
|
| 366 |
+
|
| 367 |
+
def analysis_compare_tokenize(sentences, sample_size=300):
|
| 368 |
+
"""Compare parser tokenization with underthesea word_tokenize()."""
|
| 369 |
+
try:
|
| 370 |
+
from underthesea import word_tokenize
|
| 371 |
+
except ImportError:
|
| 372 |
+
return None, None
|
| 373 |
+
|
| 374 |
+
# Filter sentences with text
|
| 375 |
+
with_text = [s for s in sentences if s["text"]]
|
| 376 |
+
if not with_text:
|
| 377 |
+
return None, None
|
| 378 |
+
|
| 379 |
+
random.seed(42)
|
| 380 |
+
sample = random.sample(with_text, min(sample_size, len(with_text)))
|
| 381 |
+
|
| 382 |
+
results = []
|
| 383 |
+
match_count = 0
|
| 384 |
+
mismatch_count = 0
|
| 385 |
+
diff_categories = Counter()
|
| 386 |
+
|
| 387 |
+
for sent in sample:
|
| 388 |
+
text = sent["text"]
|
| 389 |
+
parser_tokens = sent["tokens"]
|
| 390 |
+
|
| 391 |
+
# word_tokenize returns list of tokens (underscore-joined for multi-syllable)
|
| 392 |
+
wt_raw = word_tokenize(text)
|
| 393 |
+
# Convert underscore-joined tokens to space-separated
|
| 394 |
+
wt_tokens = [t.replace("_", " ") for t in wt_raw]
|
| 395 |
+
|
| 396 |
+
# Compare
|
| 397 |
+
if parser_tokens == wt_tokens:
|
| 398 |
+
match_count += 1
|
| 399 |
+
results.append({
|
| 400 |
+
"sent_id": sent["sent_id"],
|
| 401 |
+
"match": True,
|
| 402 |
+
"parser_tokens": parser_tokens,
|
| 403 |
+
"wt_tokens": wt_tokens,
|
| 404 |
+
"diffs": [],
|
| 405 |
+
})
|
| 406 |
+
else:
|
| 407 |
+
mismatch_count += 1
|
| 408 |
+
diffs = find_token_diffs(parser_tokens, wt_tokens)
|
| 409 |
+
results.append({
|
| 410 |
+
"sent_id": sent["sent_id"],
|
| 411 |
+
"match": False,
|
| 412 |
+
"parser_tokens": parser_tokens,
|
| 413 |
+
"wt_tokens": wt_tokens,
|
| 414 |
+
"diffs": diffs,
|
| 415 |
+
})
|
| 416 |
+
for d in diffs:
|
| 417 |
+
diff_categories[d["type"]] += 1
|
| 418 |
+
|
| 419 |
+
return results, {
|
| 420 |
+
"sample_size": len(sample),
|
| 421 |
+
"match_count": match_count,
|
| 422 |
+
"mismatch_count": mismatch_count,
|
| 423 |
+
"match_rate": match_count / len(sample) * 100 if sample else 0,
|
| 424 |
+
"diff_categories": diff_categories,
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def find_token_diffs(parser_tokens, wt_tokens):
|
| 429 |
+
"""Find differences between two tokenizations using alignment."""
|
| 430 |
+
diffs = []
|
| 431 |
+
|
| 432 |
+
# Reconstruct syllable sequences
|
| 433 |
+
p_syls = []
|
| 434 |
+
for i, tok in enumerate(parser_tokens):
|
| 435 |
+
for syl in tok.split():
|
| 436 |
+
p_syls.append((syl, i))
|
| 437 |
+
|
| 438 |
+
w_syls = []
|
| 439 |
+
for i, tok in enumerate(wt_tokens):
|
| 440 |
+
for syl in tok.split():
|
| 441 |
+
w_syls.append((syl, i))
|
| 442 |
+
|
| 443 |
+
# Align by syllable
|
| 444 |
+
pi, wi = 0, 0
|
| 445 |
+
while pi < len(p_syls) and wi < len(w_syls):
|
| 446 |
+
if p_syls[pi][0] == w_syls[wi][0]:
|
| 447 |
+
if p_syls[pi][1] != w_syls[wi][1]:
|
| 448 |
+
# Same syllable, different token boundary
|
| 449 |
+
p_tok = parser_tokens[p_syls[pi][1]]
|
| 450 |
+
w_tok = wt_tokens[w_syls[wi][1]]
|
| 451 |
+
if count_syllables(p_tok) > count_syllables(w_tok):
|
| 452 |
+
diff_type = "parser_merges"
|
| 453 |
+
elif count_syllables(p_tok) < count_syllables(w_tok):
|
| 454 |
+
diff_type = "parser_splits"
|
| 455 |
+
else:
|
| 456 |
+
diff_type = "boundary_shift"
|
| 457 |
+
diffs.append({
|
| 458 |
+
"type": diff_type,
|
| 459 |
+
"parser": p_tok,
|
| 460 |
+
"wt": w_tok,
|
| 461 |
+
})
|
| 462 |
+
pi += 1
|
| 463 |
+
wi += 1
|
| 464 |
+
else:
|
| 465 |
+
# Syllable mismatch - skip ahead
|
| 466 |
+
pi += 1
|
| 467 |
+
wi += 1
|
| 468 |
+
|
| 469 |
+
return diffs
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def format_compare_report(results, stats):
|
| 473 |
+
"""Format comparison report as markdown."""
|
| 474 |
+
lines = []
|
| 475 |
+
lines.append("## 4. Comparison with `word_tokenize()`")
|
| 476 |
+
lines.append("")
|
| 477 |
+
|
| 478 |
+
if results is None:
|
| 479 |
+
lines.append("**Skipped**: `underthesea` not available or not requested. "
|
| 480 |
+
"Use `--compare-tokenize` to enable.")
|
| 481 |
+
lines.append("")
|
| 482 |
+
return "\n".join(lines)
|
| 483 |
+
|
| 484 |
+
lines.append(f"Sample size: {stats['sample_size']} sentences")
|
| 485 |
+
lines.append(f"- Exact match: {stats['match_count']} ({stats['match_rate']:.1f}%)")
|
| 486 |
+
lines.append(f"- Mismatch: {stats['mismatch_count']} ({100 - stats['match_rate']:.1f}%)")
|
| 487 |
+
lines.append("")
|
| 488 |
+
|
| 489 |
+
if stats["match_rate"] > 99.0:
|
| 490 |
+
lines.append("### Finding: Shared Tokenizer")
|
| 491 |
+
lines.append("")
|
| 492 |
+
lines.append("The near-100% match rate confirms that Underthesea's `dependency_parse()` "
|
| 493 |
+
"internally uses the same `word_tokenize()` model for segmentation. "
|
| 494 |
+
"This means segmentation errors in UDD-1 are inherent to the Underthesea "
|
| 495 |
+
"tokenizer and cannot be detected by comparing against `word_tokenize()`. "
|
| 496 |
+
"A meaningful comparison would require an independent segmentation tool "
|
| 497 |
+
"(e.g., VnCoreNLP, pyvi) or gold-standard segmented data.")
|
| 498 |
+
lines.append("")
|
| 499 |
+
|
| 500 |
+
if stats["diff_categories"]:
|
| 501 |
+
lines.append("### Difference Categories")
|
| 502 |
+
lines.append("")
|
| 503 |
+
lines.append("| Category | Count | Description |")
|
| 504 |
+
lines.append("|:---|---:|:---|")
|
| 505 |
+
descs = {
|
| 506 |
+
"parser_merges": "Parser joins tokens that word_tokenize keeps separate",
|
| 507 |
+
"parser_splits": "Parser splits tokens that word_tokenize joins",
|
| 508 |
+
"boundary_shift": "Different token boundary placement",
|
| 509 |
+
}
|
| 510 |
+
for cat, count in stats["diff_categories"].most_common():
|
| 511 |
+
desc = descs.get(cat, cat)
|
| 512 |
+
lines.append(f"| {cat} | {count} | {desc} |")
|
| 513 |
+
lines.append("")
|
| 514 |
+
|
| 515 |
+
# Show sample mismatches
|
| 516 |
+
mismatches = [r for r in results if not r["match"]]
|
| 517 |
+
if mismatches:
|
| 518 |
+
lines.append("### Sample Mismatches (first 20)")
|
| 519 |
+
lines.append("")
|
| 520 |
+
for r in mismatches[:20]:
|
| 521 |
+
lines.append(f"**{r['sent_id']}**")
|
| 522 |
+
lines.append(f"- Parser: `{'` `'.join(r['parser_tokens'])}`")
|
| 523 |
+
lines.append(f"- word_tokenize: `{'` `'.join(r['wt_tokens'])}`")
|
| 524 |
+
if r["diffs"]:
|
| 525 |
+
diff_strs = [f"{d['type']}: \"{d['parser']}\" vs \"{d['wt']}\"" for d in r["diffs"][:5]]
|
| 526 |
+
lines.append(f"- Diffs: {'; '.join(diff_strs)}")
|
| 527 |
+
lines.append("")
|
| 528 |
+
|
| 529 |
+
lines.append("")
|
| 530 |
+
return "\n".join(lines)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
# ---- Analysis 5: Manual review samples ----
|
| 534 |
+
|
| 535 |
+
def analysis_manual_samples(sentences, long_tokens, cross_boundary, inconsistencies,
|
| 536 |
+
compare_results=None, n_samples=100):
|
| 537 |
+
"""Generate samples for manual review."""
|
| 538 |
+
suspicious_ids = set()
|
| 539 |
+
|
| 540 |
+
# Collect suspicious sentence IDs
|
| 541 |
+
for t in long_tokens:
|
| 542 |
+
suspicious_ids.add(t["sent_id"])
|
| 543 |
+
for t in cross_boundary:
|
| 544 |
+
suspicious_ids.add(t["sent_id"])
|
| 545 |
+
|
| 546 |
+
# Sentences with inconsistent segmentation
|
| 547 |
+
inconsistent_tokens = set(inconsistencies.keys()) if inconsistencies else set()
|
| 548 |
+
for sent in sentences:
|
| 549 |
+
for token in sent["tokens"]:
|
| 550 |
+
if token.lower() in inconsistent_tokens:
|
| 551 |
+
suspicious_ids.add(sent["sent_id"])
|
| 552 |
+
break
|
| 553 |
+
|
| 554 |
+
# Build id -> sentence map
|
| 555 |
+
id_map = {s["sent_id"]: s for s in sentences}
|
| 556 |
+
|
| 557 |
+
# Select samples: 30% suspicious, 70% random
|
| 558 |
+
n_suspicious = min(int(n_samples * 0.3), len(suspicious_ids))
|
| 559 |
+
n_random = n_samples - n_suspicious
|
| 560 |
+
|
| 561 |
+
random.seed(42)
|
| 562 |
+
suspicious_sample = random.sample(sorted(suspicious_ids), min(n_suspicious, len(suspicious_ids)))
|
| 563 |
+
|
| 564 |
+
remaining_ids = [s["sent_id"] for s in sentences if s["sent_id"] not in suspicious_ids]
|
| 565 |
+
random_sample = random.sample(remaining_ids, min(n_random, len(remaining_ids)))
|
| 566 |
+
|
| 567 |
+
samples = []
|
| 568 |
+
|
| 569 |
+
# Build compare lookup
|
| 570 |
+
compare_lookup = {}
|
| 571 |
+
if compare_results:
|
| 572 |
+
for r in compare_results:
|
| 573 |
+
compare_lookup[r["sent_id"]] = r
|
| 574 |
+
|
| 575 |
+
for sid in suspicious_sample + random_sample:
|
| 576 |
+
sent = id_map.get(sid)
|
| 577 |
+
if not sent:
|
| 578 |
+
continue
|
| 579 |
+
|
| 580 |
+
flags = []
|
| 581 |
+
# Check for long tokens
|
| 582 |
+
for token in sent["tokens"]:
|
| 583 |
+
if count_syllables(token) >= 4:
|
| 584 |
+
flags.append(f"long_token: \"{token}\"")
|
| 585 |
+
# Check for cross-boundary
|
| 586 |
+
for token, upos in zip(sent["tokens"], sent["upos"]):
|
| 587 |
+
if upos != "PROPN" and " " in token:
|
| 588 |
+
syllables = token.split()
|
| 589 |
+
if any(s[0].isupper() for s in syllables[1:] if s):
|
| 590 |
+
flags.append(f"cross_boundary: \"{token}\"")
|
| 591 |
+
# Check for inconsistency
|
| 592 |
+
for i in range(len(sent["tokens"]) - 1):
|
| 593 |
+
bigram = sent["tokens"][i].lower() + " " + sent["tokens"][i + 1].lower()
|
| 594 |
+
if bigram in inconsistent_tokens:
|
| 595 |
+
flags.append(f"inconsistent: \"{sent['tokens'][i]}\" + \"{sent['tokens'][i+1]}\" (also as \"{bigram}\")")
|
| 596 |
+
|
| 597 |
+
wt_output = None
|
| 598 |
+
if sid in compare_lookup:
|
| 599 |
+
cr = compare_lookup[sid]
|
| 600 |
+
wt_output = cr["wt_tokens"]
|
| 601 |
+
|
| 602 |
+
samples.append({
|
| 603 |
+
"sent_id": sid,
|
| 604 |
+
"text": sent["text"],
|
| 605 |
+
"tokens": sent["tokens"],
|
| 606 |
+
"upos": sent["upos"],
|
| 607 |
+
"flags": flags,
|
| 608 |
+
"wt_tokens": wt_output,
|
| 609 |
+
"is_suspicious": sid in suspicious_ids,
|
| 610 |
+
})
|
| 611 |
+
|
| 612 |
+
return samples
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
def format_samples_report(samples):
|
| 616 |
+
"""Format manual review samples as markdown."""
|
| 617 |
+
lines = []
|
| 618 |
+
lines.append("## 5. Manual Review Samples")
|
| 619 |
+
lines.append("")
|
| 620 |
+
|
| 621 |
+
n_suspicious = sum(1 for s in samples if s["is_suspicious"])
|
| 622 |
+
n_random = len(samples) - n_suspicious
|
| 623 |
+
lines.append(f"Total samples: {len(samples)} ({n_suspicious} suspicious, {n_random} random)")
|
| 624 |
+
lines.append("")
|
| 625 |
+
|
| 626 |
+
for i, s in enumerate(samples, 1):
|
| 627 |
+
tag = "SUSPICIOUS" if s["is_suspicious"] else "RANDOM"
|
| 628 |
+
lines.append(f"### Sample {i} [{tag}] — {s['sent_id']}")
|
| 629 |
+
lines.append("")
|
| 630 |
+
if s["text"]:
|
| 631 |
+
lines.append(f"**Text:** {s['text']}")
|
| 632 |
+
lines.append(f"**Tokens:** `{'` `'.join(s['tokens'])}`")
|
| 633 |
+
lines.append(f"**UPOS:** {' '.join(s['upos'])}")
|
| 634 |
+
if s["wt_tokens"]:
|
| 635 |
+
lines.append(f"**word_tokenize:** `{'` `'.join(s['wt_tokens'])}`")
|
| 636 |
+
if s["flags"]:
|
| 637 |
+
lines.append(f"**Flags:** {'; '.join(s['flags'])}")
|
| 638 |
+
lines.append("")
|
| 639 |
+
|
| 640 |
+
return "\n".join(lines)
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
# ---- Analysis 6: Dictionary-based validation ----
|
| 644 |
+
|
| 645 |
+
def load_dictionary():
|
| 646 |
+
"""Load Vietnamese dictionary from underthesea (Viet74K / UTS_Dictionary)."""
|
| 647 |
+
try:
|
| 648 |
+
from underthesea.corpus import viet_dict_74K
|
| 649 |
+
words = viet_dict_74K.words
|
| 650 |
+
word_set = set(w.lower().strip() for w in words if w.strip())
|
| 651 |
+
return word_set, "Viet74K"
|
| 652 |
+
except Exception:
|
| 653 |
+
pass
|
| 654 |
+
try:
|
| 655 |
+
from underthesea.datasets.uts_dictionary import UTSDictionary
|
| 656 |
+
d = UTSDictionary()
|
| 657 |
+
word_set = set(w.lower().strip() for w in d.words if w.strip())
|
| 658 |
+
return word_set, "UTS_Dictionary"
|
| 659 |
+
except Exception:
|
| 660 |
+
pass
|
| 661 |
+
return None, None
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
def analysis_dictionary_validation(sentences, dict_set):
|
| 665 |
+
"""Validate word segmentation against a dictionary.
|
| 666 |
+
|
| 667 |
+
Checks:
|
| 668 |
+
A) Token coverage: is each token in the dictionary?
|
| 669 |
+
B) Under-segmentation: multi-syllable tokens NOT in dictionary (possible over-merge)
|
| 670 |
+
C) Over-segmentation: adjacent token pairs that form a dictionary word (possible under-merge)
|
| 671 |
+
"""
|
| 672 |
+
# A) Token coverage
|
| 673 |
+
total_tokens = 0
|
| 674 |
+
in_dict = 0
|
| 675 |
+
not_in_dict = 0
|
| 676 |
+
oov_by_upos = defaultdict(Counter) # upos -> {token: count}
|
| 677 |
+
oov_counter = Counter()
|
| 678 |
+
in_dict_by_upos = Counter()
|
| 679 |
+
total_by_upos = Counter()
|
| 680 |
+
|
| 681 |
+
# B) Under-segmentation: multi-syllable OOV
|
| 682 |
+
under_seg_candidates = [] # tokens not in dict, but sub-parts are
|
| 683 |
+
multi_oov_counter = Counter()
|
| 684 |
+
|
| 685 |
+
# C) Over-segmentation: bigrams that form a dictionary word
|
| 686 |
+
over_seg_counter = Counter()
|
| 687 |
+
over_seg_examples = {} # bigram -> first sent_id
|
| 688 |
+
|
| 689 |
+
for sent in sentences:
|
| 690 |
+
tokens = sent["tokens"]
|
| 691 |
+
upos_list = sent["upos"]
|
| 692 |
+
|
| 693 |
+
for i, (token, upos) in enumerate(zip(tokens, upos_list)):
|
| 694 |
+
t_lower = token.lower().strip()
|
| 695 |
+
total_tokens += 1
|
| 696 |
+
total_by_upos[upos] += 1
|
| 697 |
+
|
| 698 |
+
if upos == "PUNCT" or upos == "NUM" or upos == "SYM":
|
| 699 |
+
# Skip punctuation, numbers, symbols — not meaningful for dict lookup
|
| 700 |
+
in_dict += 1
|
| 701 |
+
in_dict_by_upos[upos] += 1
|
| 702 |
+
continue
|
| 703 |
+
|
| 704 |
+
if t_lower in dict_set:
|
| 705 |
+
in_dict += 1
|
| 706 |
+
in_dict_by_upos[upos] += 1
|
| 707 |
+
else:
|
| 708 |
+
not_in_dict += 1
|
| 709 |
+
oov_counter[t_lower] += 1
|
| 710 |
+
oov_by_upos[upos][t_lower] += 1
|
| 711 |
+
|
| 712 |
+
# B) Check under-segmentation for multi-syllable OOV
|
| 713 |
+
syllables = t_lower.split()
|
| 714 |
+
if len(syllables) >= 2:
|
| 715 |
+
# Check if all individual syllables or sub-parts are in dict
|
| 716 |
+
all_parts_known = all(s in dict_set for s in syllables)
|
| 717 |
+
if all_parts_known:
|
| 718 |
+
multi_oov_counter[t_lower] += 1
|
| 719 |
+
under_seg_candidates.append({
|
| 720 |
+
"sent_id": sent["sent_id"],
|
| 721 |
+
"token": token,
|
| 722 |
+
"upos": upos,
|
| 723 |
+
"syllables": syllables,
|
| 724 |
+
})
|
| 725 |
+
|
| 726 |
+
# C) Check over-segmentation: adjacent pairs forming a dict word
|
| 727 |
+
# Filter: skip if both tokens are function words (likely false positive)
|
| 728 |
+
func_upos = {"ADP", "AUX", "CCONJ", "SCONJ", "DET", "PART", "PUNCT"}
|
| 729 |
+
for i in range(len(tokens) - 1):
|
| 730 |
+
# Skip if both tokens are function words or punctuation
|
| 731 |
+
if upos_list[i] in func_upos and upos_list[i + 1] in func_upos:
|
| 732 |
+
continue
|
| 733 |
+
# Skip if either token is punctuation
|
| 734 |
+
if upos_list[i] == "PUNCT" or upos_list[i + 1] == "PUNCT":
|
| 735 |
+
continue
|
| 736 |
+
bigram = tokens[i].lower().strip() + " " + tokens[i + 1].lower().strip()
|
| 737 |
+
if bigram in dict_set:
|
| 738 |
+
over_seg_counter[bigram] += 1
|
| 739 |
+
if bigram not in over_seg_examples:
|
| 740 |
+
over_seg_examples[bigram] = sent["sent_id"]
|
| 741 |
+
|
| 742 |
+
# Also check trigrams (only content-word sequences)
|
| 743 |
+
for i in range(len(tokens) - 2):
|
| 744 |
+
if upos_list[i] == "PUNCT" or upos_list[i + 2] == "PUNCT":
|
| 745 |
+
continue
|
| 746 |
+
trigram = (tokens[i].lower().strip() + " " +
|
| 747 |
+
tokens[i + 1].lower().strip() + " " +
|
| 748 |
+
tokens[i + 2].lower().strip())
|
| 749 |
+
if trigram in dict_set:
|
| 750 |
+
over_seg_counter[trigram] += 1
|
| 751 |
+
if trigram not in over_seg_examples:
|
| 752 |
+
over_seg_examples[trigram] = sent["sent_id"]
|
| 753 |
+
|
| 754 |
+
return {
|
| 755 |
+
"total_tokens": total_tokens,
|
| 756 |
+
"in_dict": in_dict,
|
| 757 |
+
"not_in_dict": not_in_dict,
|
| 758 |
+
"coverage": in_dict / total_tokens * 100 if total_tokens else 0,
|
| 759 |
+
"oov_counter": oov_counter,
|
| 760 |
+
"oov_by_upos": oov_by_upos,
|
| 761 |
+
"in_dict_by_upos": in_dict_by_upos,
|
| 762 |
+
"total_by_upos": total_by_upos,
|
| 763 |
+
"multi_oov_counter": multi_oov_counter,
|
| 764 |
+
"under_seg_candidates": under_seg_candidates,
|
| 765 |
+
"over_seg_counter": over_seg_counter,
|
| 766 |
+
"over_seg_examples": over_seg_examples,
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
def format_dictionary_report(stats, dict_name, dict_size=0):
|
| 771 |
+
"""Format dictionary validation report as markdown."""
|
| 772 |
+
lines = []
|
| 773 |
+
lines.append("## 6. Dictionary-Based Validation")
|
| 774 |
+
lines.append("")
|
| 775 |
+
|
| 776 |
+
if stats is None:
|
| 777 |
+
lines.append("**Skipped**: Dictionary not available or not requested. "
|
| 778 |
+
"Use `--dict-validate` to enable.")
|
| 779 |
+
lines.append("")
|
| 780 |
+
return "\n".join(lines)
|
| 781 |
+
|
| 782 |
+
lines.append(f"**Dictionary:** {dict_name}")
|
| 783 |
+
lines.append(f"**Dictionary size:** {dict_size:,} entries")
|
| 784 |
+
lines.append("")
|
| 785 |
+
|
| 786 |
+
# A) Coverage
|
| 787 |
+
lines.append("### 6a. Token Coverage")
|
| 788 |
+
lines.append("")
|
| 789 |
+
lines.append(f"| Metric | Count | Percentage |")
|
| 790 |
+
lines.append(f"|:---|---:|---:|")
|
| 791 |
+
lines.append(f"| In dictionary | {stats['in_dict']:,} | {stats['coverage']:.1f}% |")
|
| 792 |
+
oov_pct = stats['not_in_dict'] / stats['total_tokens'] * 100
|
| 793 |
+
lines.append(f"| Out-of-vocabulary (OOV) | {stats['not_in_dict']:,} | {oov_pct:.1f}% |")
|
| 794 |
+
lines.append(f"| Total (excl. PUNCT/NUM/SYM) | {stats['total_tokens']:,} | 100% |")
|
| 795 |
+
lines.append("")
|
| 796 |
+
|
| 797 |
+
# Coverage by UPOS
|
| 798 |
+
lines.append("**Coverage by UPOS** (top tags):")
|
| 799 |
+
lines.append("")
|
| 800 |
+
lines.append("| UPOS | In Dict | Total | Coverage |")
|
| 801 |
+
lines.append("|:---|---:|---:|---:|")
|
| 802 |
+
for upos in sorted(stats["total_by_upos"], key=stats["total_by_upos"].get, reverse=True)[:12]:
|
| 803 |
+
total = stats["total_by_upos"][upos]
|
| 804 |
+
in_d = stats["in_dict_by_upos"].get(upos, 0)
|
| 805 |
+
cov = in_d / total * 100 if total else 0
|
| 806 |
+
lines.append(f"| {upos} | {in_d:,} | {total:,} | {cov:.1f}% |")
|
| 807 |
+
lines.append("")
|
| 808 |
+
|
| 809 |
+
# Top OOV tokens
|
| 810 |
+
lines.append("**Top 30 OOV tokens:**")
|
| 811 |
+
lines.append("")
|
| 812 |
+
lines.append("| Token | Count | UPOS |")
|
| 813 |
+
lines.append("|:---|---:|:---|")
|
| 814 |
+
for token, count in stats["oov_counter"].most_common(30):
|
| 815 |
+
# Find primary UPOS for this token
|
| 816 |
+
upos_for_token = "?"
|
| 817 |
+
for upos, tokens in stats["oov_by_upos"].items():
|
| 818 |
+
if token in tokens:
|
| 819 |
+
upos_for_token = upos
|
| 820 |
+
break
|
| 821 |
+
lines.append(f"| {token} | {count} | {upos_for_token} |")
|
| 822 |
+
lines.append("")
|
| 823 |
+
|
| 824 |
+
# B) Under-segmentation candidates
|
| 825 |
+
lines.append("### 6b. Possible Under-Segmentation (Over-Merged Tokens)")
|
| 826 |
+
lines.append("")
|
| 827 |
+
lines.append("Multi-syllable tokens NOT in dictionary, but all individual syllables ARE")
|
| 828 |
+
lines.append("in dictionary. These may be incorrectly merged by the tokenizer.")
|
| 829 |
+
lines.append("")
|
| 830 |
+
n_under = sum(stats["multi_oov_counter"].values())
|
| 831 |
+
lines.append(f"Total occurrences: {n_under:,}")
|
| 832 |
+
lines.append(f"Unique forms: {len(stats['multi_oov_counter']):,}")
|
| 833 |
+
lines.append("")
|
| 834 |
+
if stats["multi_oov_counter"]:
|
| 835 |
+
lines.append("| Token | Count | Sub-parts |")
|
| 836 |
+
lines.append("|:---|---:|:---|")
|
| 837 |
+
for token, count in stats["multi_oov_counter"].most_common(40):
|
| 838 |
+
parts = " + ".join(token.split())
|
| 839 |
+
lines.append(f"| {token} | {count} | {parts} |")
|
| 840 |
+
lines.append("")
|
| 841 |
+
|
| 842 |
+
# C) Over-segmentation candidates
|
| 843 |
+
lines.append("### 6c. Possible Over-Segmentation (Under-Merged Tokens)")
|
| 844 |
+
lines.append("")
|
| 845 |
+
lines.append("Adjacent tokens that together form a word found in the dictionary.")
|
| 846 |
+
lines.append("These may be incorrectly split by the tokenizer.")
|
| 847 |
+
lines.append("")
|
| 848 |
+
n_over = sum(stats["over_seg_counter"].values())
|
| 849 |
+
lines.append(f"Total occurrences: {n_over:,}")
|
| 850 |
+
lines.append(f"Unique dictionary words split: {len(stats['over_seg_counter']):,}")
|
| 851 |
+
lines.append("")
|
| 852 |
+
if stats["over_seg_counter"]:
|
| 853 |
+
lines.append("| Dictionary Word | Times Split | Example sent_id |")
|
| 854 |
+
lines.append("|:---|---:|:---|")
|
| 855 |
+
for word, count in stats["over_seg_counter"].most_common(50):
|
| 856 |
+
example_id = stats["over_seg_examples"].get(word, "?")
|
| 857 |
+
lines.append(f"| {word} | {count} | {example_id} |")
|
| 858 |
+
lines.append("")
|
| 859 |
+
|
| 860 |
+
# Summary
|
| 861 |
+
lines.append("### 6d. Summary")
|
| 862 |
+
lines.append("")
|
| 863 |
+
lines.append(f"- **Dictionary coverage**: {stats['coverage']:.1f}% of tokens are known words")
|
| 864 |
+
lines.append(f"- **Possible over-merges**: {len(stats['multi_oov_counter']):,} unique multi-syllable "
|
| 865 |
+
f"OOV forms ({n_under:,} occurrences)")
|
| 866 |
+
lines.append(f"- **Possible under-merges**: {len(stats['over_seg_counter']):,} unique dictionary words "
|
| 867 |
+
f"found split ({n_over:,} occurrences)")
|
| 868 |
+
lines.append("")
|
| 869 |
+
|
| 870 |
+
return "\n".join(lines)
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
# ---- Main ----
|
| 874 |
+
|
| 875 |
+
def main():
|
| 876 |
+
parser = argparse.ArgumentParser(description="Evaluate UDD-1 word segmentation quality")
|
| 877 |
+
parser.add_argument(
|
| 878 |
+
"-i", "--input", nargs="+",
|
| 879 |
+
help="Input CoNLL-U files. If not specified, uses default UDD-1 files.",
|
| 880 |
+
)
|
| 881 |
+
parser.add_argument(
|
| 882 |
+
"--all-files", action="store_true",
|
| 883 |
+
help="Use all UDD-1 files (train, dev, test)",
|
| 884 |
+
)
|
| 885 |
+
parser.add_argument(
|
| 886 |
+
"-o", "--output", default="SEGMENTATION_EVAL.md",
|
| 887 |
+
help="Output markdown report file (default: SEGMENTATION_EVAL.md)",
|
| 888 |
+
)
|
| 889 |
+
parser.add_argument(
|
| 890 |
+
"--compare-tokenize", action="store_true",
|
| 891 |
+
help="Compare with underthesea word_tokenize() (requires underthesea)",
|
| 892 |
+
)
|
| 893 |
+
parser.add_argument(
|
| 894 |
+
"--sample-size", type=int, default=300,
|
| 895 |
+
help="Number of sentences to sample for word_tokenize comparison (default: 300)",
|
| 896 |
+
)
|
| 897 |
+
parser.add_argument(
|
| 898 |
+
"--review-samples", type=int, default=100,
|
| 899 |
+
help="Number of manual review samples (default: 100)",
|
| 900 |
+
)
|
| 901 |
+
parser.add_argument(
|
| 902 |
+
"--dict-validate", action="store_true",
|
| 903 |
+
help="Validate segmentation against Vietnamese dictionary (requires underthesea)",
|
| 904 |
+
)
|
| 905 |
+
args = parser.parse_args()
|
| 906 |
+
|
| 907 |
+
base_dir = dirname(dirname(__file__))
|
| 908 |
+
|
| 909 |
+
# Determine input files
|
| 910 |
+
if args.all_files:
|
| 911 |
+
input_files = [
|
| 912 |
+
join(base_dir, "vi_udd-ud-train.conllu"),
|
| 913 |
+
join(base_dir, "vi_udd-ud-dev.conllu"),
|
| 914 |
+
join(base_dir, "vi_udd-ud-test.conllu"),
|
| 915 |
+
]
|
| 916 |
+
elif args.input:
|
| 917 |
+
input_files = args.input
|
| 918 |
+
else:
|
| 919 |
+
input_files = [join(base_dir, "vi_udd-ud-train.conllu")]
|
| 920 |
+
|
| 921 |
+
# Parse all files
|
| 922 |
+
print(f"Parsing {len(input_files)} file(s)...")
|
| 923 |
+
all_sentences = []
|
| 924 |
+
for filepath in input_files:
|
| 925 |
+
if not exists(filepath):
|
| 926 |
+
print(f" WARNING: {filepath} not found, skipping")
|
| 927 |
+
continue
|
| 928 |
+
sents = parse_conllu(filepath)
|
| 929 |
+
print(f" {filepath}: {len(sents):,} sentences")
|
| 930 |
+
all_sentences.extend(sents)
|
| 931 |
+
|
| 932 |
+
print(f"Total: {len(all_sentences):,} sentences, "
|
| 933 |
+
f"{sum(len(s['tokens']) for s in all_sentences):,} tokens")
|
| 934 |
+
print()
|
| 935 |
+
|
| 936 |
+
# Run analyses
|
| 937 |
+
report_parts = []
|
| 938 |
+
report_parts.append("# UDD-1 Word Segmentation Evaluation")
|
| 939 |
+
report_parts.append("")
|
| 940 |
+
report_parts.append(f"**Files analyzed:** {', '.join(f.split('/')[-1] for f in input_files)}")
|
| 941 |
+
report_parts.append(f"**Total sentences:** {len(all_sentences):,}")
|
| 942 |
+
report_parts.append(f"**Total tokens:** {sum(len(s['tokens']) for s in all_sentences):,}")
|
| 943 |
+
report_parts.append("")
|
| 944 |
+
|
| 945 |
+
# Analysis 1
|
| 946 |
+
print("Analysis 1: Syllable distribution...")
|
| 947 |
+
overall, by_upos, total_tokens = analysis_syllable_distribution(all_sentences)
|
| 948 |
+
report_parts.append(format_syllable_report(overall, by_upos, total_tokens))
|
| 949 |
+
|
| 950 |
+
# Analysis 2
|
| 951 |
+
print("Analysis 2: Anomalous tokens...")
|
| 952 |
+
long_tokens, long_counter, cross_boundary, cross_counter, legal_stats = \
|
| 953 |
+
analysis_anomalous_tokens(all_sentences)
|
| 954 |
+
report_parts.append(format_anomalous_report(
|
| 955 |
+
long_tokens, long_counter, cross_boundary, cross_counter, legal_stats))
|
| 956 |
+
|
| 957 |
+
# Analysis 3
|
| 958 |
+
print("Analysis 3: Inconsistent segmentation...")
|
| 959 |
+
inconsistencies = analysis_inconsistency(all_sentences)
|
| 960 |
+
report_parts.append(format_inconsistency_report(inconsistencies))
|
| 961 |
+
|
| 962 |
+
# Analysis 4
|
| 963 |
+
compare_results = None
|
| 964 |
+
compare_stats = None
|
| 965 |
+
if args.compare_tokenize:
|
| 966 |
+
print(f"Analysis 4: Comparing with word_tokenize() (sample={args.sample_size})...")
|
| 967 |
+
compare_results, compare_stats = analysis_compare_tokenize(
|
| 968 |
+
all_sentences, sample_size=args.sample_size)
|
| 969 |
+
else:
|
| 970 |
+
print("Analysis 4: Skipped (use --compare-tokenize to enable)")
|
| 971 |
+
report_parts.append(format_compare_report(compare_results, compare_stats))
|
| 972 |
+
|
| 973 |
+
# Analysis 5
|
| 974 |
+
print(f"Analysis 5: Manual review samples (n={args.review_samples})...")
|
| 975 |
+
samples = analysis_manual_samples(
|
| 976 |
+
all_sentences, long_tokens, cross_boundary, inconsistencies,
|
| 977 |
+
compare_results=compare_results, n_samples=args.review_samples,
|
| 978 |
+
)
|
| 979 |
+
report_parts.append(format_samples_report(samples))
|
| 980 |
+
|
| 981 |
+
# Analysis 6
|
| 982 |
+
dict_stats = None
|
| 983 |
+
dict_name = None
|
| 984 |
+
if args.dict_validate:
|
| 985 |
+
print("Analysis 6: Dictionary-based validation...")
|
| 986 |
+
dict_set, dict_name = load_dictionary()
|
| 987 |
+
if dict_set:
|
| 988 |
+
print(f" Dictionary: {dict_name} ({len(dict_set):,} entries)")
|
| 989 |
+
dict_stats = analysis_dictionary_validation(all_sentences, dict_set)
|
| 990 |
+
else:
|
| 991 |
+
print(" WARNING: No dictionary available, skipping")
|
| 992 |
+
else:
|
| 993 |
+
print("Analysis 6: Skipped (use --dict-validate to enable)")
|
| 994 |
+
dict_size = len(dict_set) if dict_set else 0
|
| 995 |
+
report_parts.append(format_dictionary_report(dict_stats, dict_name, dict_size))
|
| 996 |
+
|
| 997 |
+
# Write report
|
| 998 |
+
output_path = args.output
|
| 999 |
+
if not output_path.startswith("/"):
|
| 1000 |
+
output_path = join(base_dir, output_path)
|
| 1001 |
+
|
| 1002 |
+
report = "\n".join(report_parts)
|
| 1003 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 1004 |
+
f.write(report)
|
| 1005 |
+
|
| 1006 |
+
print(f"\nReport written to: {output_path}")
|
| 1007 |
+
print(f" Total inconsistent forms: {len(inconsistencies):,}")
|
| 1008 |
+
print(f" Long tokens (4+ syl): {len(long_tokens):,} occurrences")
|
| 1009 |
+
print(f" Cross-boundary candidates: {len(cross_boundary):,} occurrences")
|
| 1010 |
+
if compare_stats:
|
| 1011 |
+
print(f" word_tokenize match rate: {compare_stats['match_rate']:.1f}%")
|
| 1012 |
+
if dict_stats:
|
| 1013 |
+
print(f" Dictionary coverage: {dict_stats['coverage']:.1f}%")
|
| 1014 |
+
n_under = sum(dict_stats["multi_oov_counter"].values())
|
| 1015 |
+
n_over = sum(dict_stats["over_seg_counter"].values())
|
| 1016 |
+
print(f" Possible over-merges: {len(dict_stats['multi_oov_counter']):,} forms ({n_under:,} occ)")
|
| 1017 |
+
print(f" Possible under-merges: {len(dict_stats['over_seg_counter']):,} forms ({n_over:,} occ)")
|
| 1018 |
+
|
| 1019 |
+
|
| 1020 |
+
if __name__ == "__main__":
|
| 1021 |
+
main()
|
{scripts → src}/gpu_stats.py
RENAMED
|
File without changes
|
{scripts → src}/run_conversion.sh
RENAMED
|
File without changes
|
{scripts → src}/run_on_runpod.py
RENAMED
|
File without changes
|
{scripts → src}/statistics.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/LICENSE.txt
RENAMED
|
File without changes
|
{scripts → src}/udtools/MANIFEST.in
RENAMED
|
File without changes
|
{scripts → src}/udtools/README.md
RENAMED
|
File without changes
|
{scripts → src}/udtools/pyproject.toml
RENAMED
|
File without changes
|
{scripts → src}/udtools/requirements.txt
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/__init__.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/argparser.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/cli.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/data.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/incident.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/level1.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/level2.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/level3.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/level4.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/level5.py
RENAMED
|
File without changes
|
{scripts → src}/udtools/src/udtools/level6.py
RENAMED
|
File without changes
|