Datasets:
Add scripts and ud-tools from UDD-v0.1
Browse files- Add scripts: convert_to_ud.py, fetch_data.py, statistics.py, upload_to_hf.py
- Add ud-tools: validation module for Universal Dependencies format
- Add .gitignore for __pycache__
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This view is limited to 50 files because it contains too many changes. See raw diff
- .gitignore +1 -0
- scripts/convert_to_ud.py +525 -0
- scripts/fetch_data.py +115 -0
- scripts/statistics.py +182 -0
- scripts/upload_to_hf.py +80 -0
- ud-tools/requirements.txt +2 -0
- ud-tools/udtools/LICENSE.txt +339 -0
- ud-tools/udtools/MANIFEST.in +1 -0
- ud-tools/udtools/README.md +249 -0
- ud-tools/udtools/pyproject.toml +48 -0
- ud-tools/udtools/src/udtools/__init__.py +11 -0
- ud-tools/udtools/src/udtools/argparser.py +144 -0
- ud-tools/udtools/src/udtools/cli.py +49 -0
- ud-tools/udtools/src/udtools/data.py +403 -0
- ud-tools/udtools/src/udtools/incident.py +240 -0
- ud-tools/udtools/src/udtools/level1.py +623 -0
- ud-tools/udtools/src/udtools/level2.py +1288 -0
- ud-tools/udtools/src/udtools/level3.py +1157 -0
- ud-tools/udtools/src/udtools/level4.py +295 -0
- ud-tools/udtools/src/udtools/level5.py +112 -0
- ud-tools/udtools/src/udtools/level6.py +798 -0
- ud-tools/udtools/src/udtools/state.py +176 -0
- ud-tools/udtools/src/udtools/udeval.py +882 -0
- ud-tools/udtools/src/udtools/utils.py +434 -0
- ud-tools/udtools/src/udtools/validator.py +403 -0
- ud-tools/udtools/tests/test-cases/README.md +5 -0
- ud-tools/udtools/tests/test-cases/eval/cs_pud-gold.conllu +0 -0
- ud-tools/udtools/tests/test-cases/eval/cs_pud-plain.txt +0 -0
- ud-tools/udtools/tests/test-cases/eval/cs_pud-udpipe-pdtc-ud-2.17-251125.conllu +0 -0
- ud-tools/udtools/tests/test-cases/eval/eval.txt +17 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/.gitattributes +2 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/columns-format-minimal.conllu +5 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/columns-format.conllu +24 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/duplicate-id.conllu +6 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/empty-field.conllu +5 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/empty-head.conllu +5 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/empty-sentence.conllu +8 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/extra-empty-line.conllu +13 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/id-starting-from-2.conllu +10 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/id-with-extra-0.conllu +5 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/invalid-line.conllu +24 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/invalid-range.conllu +11 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/invalid-word-id.conllu +5 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/invalid-word-interval.conllu +11 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/misindexed-empty-node.conllu +7 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/misordered-multiword.conllu +12 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-comment-end.conllu +13 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-comment-mid.conllu +12 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-comment.conllu +24 -0
- ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-empty-node-2.conllu +13 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
scripts/convert_to_ud.py
ADDED
|
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Convert sentences to Universal Dependencies format compatible with HuggingFace.
|
| 3 |
+
Structure follows: https://huggingface.co/datasets/commul/universal_dependencies/viewer/vi_vtb
|
| 4 |
+
Uses underthesea dependency_parse for proper annotations.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
from os.path import dirname, expanduser, join
|
| 9 |
+
|
| 10 |
+
from underthesea import dependency_parse, pos_tag
|
| 11 |
+
|
| 12 |
+
# Map Vietnamese POS tags to Universal POS tags
|
| 13 |
+
# Based on: https://universaldependencies.org/u/pos/
|
| 14 |
+
UPOS_MAP = {
|
| 15 |
+
'N': 'NOUN', # Noun
|
| 16 |
+
'Np': 'PROPN', # Proper noun
|
| 17 |
+
'Nc': 'NOUN', # Classifier noun
|
| 18 |
+
'Nu': 'NOUN', # Unit noun
|
| 19 |
+
'V': 'VERB', # Verb
|
| 20 |
+
'A': 'ADJ', # Adjective
|
| 21 |
+
'P': 'PRON', # Pronoun
|
| 22 |
+
'R': 'ADV', # Adverb
|
| 23 |
+
'L': 'DET', # Determiner/Quantifier
|
| 24 |
+
'M': 'NUM', # Numeral
|
| 25 |
+
'E': 'ADP', # Preposition
|
| 26 |
+
'C': 'CCONJ', # Coordinating conjunction
|
| 27 |
+
'CC': 'CCONJ', # Coordinating conjunction
|
| 28 |
+
'SC': 'SCONJ', # Subordinating conjunction
|
| 29 |
+
'I': 'INTJ', # Interjection
|
| 30 |
+
'T': 'PART', # Particle
|
| 31 |
+
'B': 'X', # Foreign word
|
| 32 |
+
'Y': 'X', # Abbreviation
|
| 33 |
+
'S': 'SYM', # Symbol
|
| 34 |
+
'X': 'X', # Other
|
| 35 |
+
'CH': 'PUNCT', # Punctuation
|
| 36 |
+
'Ny': 'NOUN', # Noun (variant)
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
# Vietnamese auxiliary verbs that should be tagged as AUX
|
| 40 |
+
# Based on UD Vietnamese validation data (data.json)
|
| 41 |
+
AUX_WORDS = {
|
| 42 |
+
'bị', 'chưa thể', 'chắc chắn', 'có thể', 'có vẻ', 'cần',
|
| 43 |
+
'giả', 'không thể', 'là', 'muốn', 'nghĩa là', 'nhằm',
|
| 44 |
+
'nên', 'phải', 'quyết', 'thôi', 'thể', 'xong', 'được', 'định'
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
# Vietnamese determiners - words that should be DET when used as 'det' relation
|
| 48 |
+
DET_WORDS = {
|
| 49 |
+
'các', 'những', 'mọi', 'mỗi', 'từng', 'bất kỳ', 'một', 'hai', 'ba',
|
| 50 |
+
'này', 'đó', 'kia', 'ấy', 'nọ', 'nào', 'đấy', 'cái', 'con', 'chiếc',
|
| 51 |
+
'người', 'cả', 'phá tán' # Words that appear as det in the data
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
# Words that can be ADV when used as 'advmod'
|
| 55 |
+
ADV_WORDS = {
|
| 56 |
+
'không', 'chưa', 'đã', 'đang', 'sẽ', 'còn', 'vẫn', 'cũng', 'rất',
|
| 57 |
+
'quá', 'lắm', 'hơn', 'nhất', 'luôn', 'thường', 'hay', 'ít', 'nhiều',
|
| 58 |
+
'tự', 'một cách', 'được', 'không thể', 'lại', 'cá biệt', 'dân sự'
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
# Invalid deprels that need to be mapped to valid ones
|
| 62 |
+
DEPREL_MAP = {
|
| 63 |
+
'acomp': 'xcomp', # Adjectival complement -> open clausal complement
|
| 64 |
+
'nmod:comp': 'nmod', # Invalid subtype
|
| 65 |
+
'nmod:agent': 'obl:agent', # Agent should be obl not nmod
|
| 66 |
+
'nmod:with': 'nmod', # Invalid subtype
|
| 67 |
+
'nmod:about': 'nmod', # Invalid subtype -> nmod
|
| 68 |
+
'compound:number': 'nummod', # Number compounds should be nummod
|
| 69 |
+
'compound:nmod': 'compound', # Invalid subtype
|
| 70 |
+
'obl:pcomp': 'obl', # Invalid subtype -> obl
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def to_upos(tag, token=None):
|
| 75 |
+
"""Convert Vietnamese POS tag to Universal POS tag."""
|
| 76 |
+
# Check if token is an auxiliary verb (case insensitive)
|
| 77 |
+
if token:
|
| 78 |
+
token_lower = token.lower()
|
| 79 |
+
if token_lower in AUX_WORDS:
|
| 80 |
+
return 'AUX'
|
| 81 |
+
# Also check if lowercased token matches
|
| 82 |
+
for aux in AUX_WORDS:
|
| 83 |
+
if token_lower == aux.lower():
|
| 84 |
+
return 'AUX'
|
| 85 |
+
return UPOS_MAP.get(tag, 'X')
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def fix_syntax_errors(tokens, upos, head, deprel):
|
| 89 |
+
"""
|
| 90 |
+
Post-process to fix common UD SYNTAX validation errors.
|
| 91 |
+
Returns fixed (upos, deprel) lists.
|
| 92 |
+
Run multiple passes to handle dependencies between fixes.
|
| 93 |
+
"""
|
| 94 |
+
n = len(tokens)
|
| 95 |
+
upos = list(upos)
|
| 96 |
+
deprel = list(deprel)
|
| 97 |
+
head = [int(h) for h in head]
|
| 98 |
+
|
| 99 |
+
# First pass: fix leaf nodes (aux/mark/case/punct should not have children)
|
| 100 |
+
# Need multiple passes to handle chains of leaf nodes
|
| 101 |
+
for _ in range(5): # Multiple passes to handle chains
|
| 102 |
+
changed = False
|
| 103 |
+
for i in range(n):
|
| 104 |
+
rel = deprel[i]
|
| 105 |
+
|
| 106 |
+
# Leaf nodes should not have children - redirect children to parent
|
| 107 |
+
# Include subtypes like aux:pass, mark:pcomp, etc.
|
| 108 |
+
# Also include det, nummod, clf which should be leaves
|
| 109 |
+
if rel.split(':')[0] in ('aux', 'cop', 'mark', 'case', 'punct', 'det', 'nummod', 'clf'):
|
| 110 |
+
has_children = any(head[j] == i + 1 for j in range(n))
|
| 111 |
+
if has_children:
|
| 112 |
+
my_head = head[i]
|
| 113 |
+
for j in range(n):
|
| 114 |
+
if head[j] == i + 1:
|
| 115 |
+
head[j] = my_head
|
| 116 |
+
changed = True
|
| 117 |
+
if not changed:
|
| 118 |
+
break
|
| 119 |
+
|
| 120 |
+
for i in range(n):
|
| 121 |
+
token_lower = tokens[i].lower()
|
| 122 |
+
rel = deprel[i]
|
| 123 |
+
pos = upos[i]
|
| 124 |
+
|
| 125 |
+
# Fix 0: Map invalid deprels to valid ones
|
| 126 |
+
if rel in DEPREL_MAP:
|
| 127 |
+
deprel[i] = DEPREL_MAP[rel]
|
| 128 |
+
rel = deprel[i]
|
| 129 |
+
|
| 130 |
+
# Fix 1: rel-upos-det - 'det' (including subtypes) should be DET or PRON
|
| 131 |
+
if rel.startswith('det') and pos not in ('DET', 'PRON'):
|
| 132 |
+
# Force all 'det' relations to have DET or PRON UPOS
|
| 133 |
+
upos[i] = 'DET'
|
| 134 |
+
|
| 135 |
+
# Fix 2: rel-upos-advmod - 'advmod' (including subtypes) should be ADV
|
| 136 |
+
if rel.startswith('advmod') and pos != 'ADV':
|
| 137 |
+
# For advmod, always prefer changing UPOS to ADV
|
| 138 |
+
upos[i] = 'ADV'
|
| 139 |
+
|
| 140 |
+
# Fix 2b: rel-upos-nummod - 'nummod' should be NUM
|
| 141 |
+
if rel.startswith('nummod') and upos[i] != 'NUM':
|
| 142 |
+
# If token is clearly not a number (e.g., VERB), change relation instead
|
| 143 |
+
if upos[i] == 'VERB':
|
| 144 |
+
deprel[i] = 'acl' # Adjectival clause for verbs
|
| 145 |
+
rel = 'acl' # Update local variable too
|
| 146 |
+
elif upos[i] == 'ADJ':
|
| 147 |
+
deprel[i] = 'amod' # Adjectival modifier
|
| 148 |
+
rel = 'amod'
|
| 149 |
+
else:
|
| 150 |
+
upos[i] = 'NUM'
|
| 151 |
+
|
| 152 |
+
# Fix 3: rel-upos-mark - 'mark' (including subtypes) should not be AUX
|
| 153 |
+
if rel.startswith('mark') and pos == 'AUX':
|
| 154 |
+
upos[i] = 'SCONJ'
|
| 155 |
+
|
| 156 |
+
# Fix 3b: rel-upos-punct - 'punct' must be PUNCT, and PUNCT must have 'punct' deprel
|
| 157 |
+
if rel == 'punct' and pos != 'PUNCT':
|
| 158 |
+
# Change relation to something appropriate based on POS
|
| 159 |
+
if pos in ('VERB', 'NOUN', 'ADJ'):
|
| 160 |
+
deprel[i] = 'dep' # Use generic dependency
|
| 161 |
+
else:
|
| 162 |
+
upos[i] = 'PUNCT'
|
| 163 |
+
|
| 164 |
+
# Fix 3b2: upos-rel-punct - PUNCT must have 'punct' deprel
|
| 165 |
+
if pos == 'PUNCT' and rel != 'punct':
|
| 166 |
+
deprel[i] = 'punct'
|
| 167 |
+
rel = 'punct'
|
| 168 |
+
|
| 169 |
+
# Fix 3c: rel-upos-case - 'case' should be ADP, not ADJ, AUX or PROPN
|
| 170 |
+
if rel == 'case' and pos in ('ADJ', 'AUX', 'PROPN', 'NOUN', 'VERB'):
|
| 171 |
+
upos[i] = 'ADP'
|
| 172 |
+
|
| 173 |
+
# Fix 3d: rel-upos-cc - 'cc' should be CCONJ or SCONJ
|
| 174 |
+
if rel == 'cc' and pos not in ('CCONJ', 'SCONJ'):
|
| 175 |
+
upos[i] = 'CCONJ'
|
| 176 |
+
|
| 177 |
+
# Fix 3e: rel-upos-aux - 'aux' should be AUX, but only for valid auxiliaries
|
| 178 |
+
is_valid_aux = token_lower in AUX_WORDS or any(token_lower == aux.lower() for aux in AUX_WORDS)
|
| 179 |
+
if rel.startswith('aux'):
|
| 180 |
+
if is_valid_aux:
|
| 181 |
+
upos[i] = 'AUX'
|
| 182 |
+
pos = 'AUX'
|
| 183 |
+
else:
|
| 184 |
+
# Not a valid auxiliary - change relation to advcl or xcomp
|
| 185 |
+
if pos == 'VERB' or upos[i] == 'VERB':
|
| 186 |
+
deprel[i] = 'advcl'
|
| 187 |
+
upos[i] = 'VERB'
|
| 188 |
+
elif pos == 'ADP' or upos[i] == 'ADP':
|
| 189 |
+
deprel[i] = 'mark'
|
| 190 |
+
upos[i] = 'ADP'
|
| 191 |
+
else:
|
| 192 |
+
deprel[i] = 'xcomp'
|
| 193 |
+
rel = deprel[i]
|
| 194 |
+
pos = upos[i]
|
| 195 |
+
# Also fix AUX UPOS that's not a valid auxiliary (MORPHO aux-lemma)
|
| 196 |
+
elif pos == 'AUX' and not is_valid_aux:
|
| 197 |
+
upos[i] = 'VERB' # Default to VERB for non-aux
|
| 198 |
+
pos = 'VERB'
|
| 199 |
+
|
| 200 |
+
# Fix 3f: rel-upos-cop - 'cop' should be AUX or PRON/DET, only 'là' is valid copula
|
| 201 |
+
if rel == 'cop':
|
| 202 |
+
if token_lower != 'là':
|
| 203 |
+
# Not a valid copula, change to xcomp
|
| 204 |
+
deprel[i] = 'xcomp'
|
| 205 |
+
rel = 'xcomp'
|
| 206 |
+
elif pos not in ('AUX', 'PRON', 'DET'):
|
| 207 |
+
upos[i] = 'AUX'
|
| 208 |
+
|
| 209 |
+
# Fix 4: obl-should-be-nmod - when parent is nominal, use nmod
|
| 210 |
+
if rel.startswith('obl') and head[i] > 0:
|
| 211 |
+
parent_idx = head[i] - 1
|
| 212 |
+
if parent_idx < n and upos[parent_idx] in ('NOUN', 'PROPN', 'PRON'):
|
| 213 |
+
# Preserve subtype if exists
|
| 214 |
+
if ':' in rel:
|
| 215 |
+
deprel[i] = 'nmod:' + rel.split(':')[1]
|
| 216 |
+
else:
|
| 217 |
+
deprel[i] = 'nmod'
|
| 218 |
+
|
| 219 |
+
# Fix 5: (handled in first pass above)
|
| 220 |
+
|
| 221 |
+
# Fix 5b: right-to-left relations - flat/conj/appos must be left-to-right
|
| 222 |
+
for i in range(n):
|
| 223 |
+
rel = deprel[i]
|
| 224 |
+
base_rel = rel.split(':')[0]
|
| 225 |
+
if base_rel in ('flat', 'conj', 'appos') and head[i] > 0:
|
| 226 |
+
parent_idx = head[i] - 1
|
| 227 |
+
if parent_idx > i: # Parent comes after child (wrong direction)
|
| 228 |
+
# Change to compound which allows both directions
|
| 229 |
+
if ':' in rel:
|
| 230 |
+
deprel[i] = 'compound:' + rel.split(':')[1]
|
| 231 |
+
else:
|
| 232 |
+
deprel[i] = 'compound'
|
| 233 |
+
|
| 234 |
+
# Fix 5c: Apply DEPREL_MAP again to catch any newly created invalid deprels
|
| 235 |
+
for i in range(n):
|
| 236 |
+
if deprel[i] in DEPREL_MAP:
|
| 237 |
+
deprel[i] = DEPREL_MAP[deprel[i]]
|
| 238 |
+
|
| 239 |
+
# Fix 5d: Final check for nummod with wrong UPOS
|
| 240 |
+
for i in range(n):
|
| 241 |
+
if deprel[i].startswith('nummod') and upos[i] != 'NUM':
|
| 242 |
+
if upos[i] == 'VERB':
|
| 243 |
+
deprel[i] = 'acl'
|
| 244 |
+
elif upos[i] == 'ADJ':
|
| 245 |
+
deprel[i] = 'amod'
|
| 246 |
+
elif upos[i] == 'NOUN':
|
| 247 |
+
deprel[i] = 'nmod'
|
| 248 |
+
else:
|
| 249 |
+
upos[i] = 'NUM'
|
| 250 |
+
|
| 251 |
+
# Fix 6: too-many-subjects - add :outer subtype for multiple subjects
|
| 252 |
+
# Group all subject types (nsubj, csubj) by predicate
|
| 253 |
+
predicates = {}
|
| 254 |
+
for i in range(n):
|
| 255 |
+
base_rel = deprel[i].split(':')[0]
|
| 256 |
+
if base_rel in ('nsubj', 'csubj') and head[i] > 0:
|
| 257 |
+
pred_idx = head[i]
|
| 258 |
+
if pred_idx not in predicates:
|
| 259 |
+
predicates[pred_idx] = []
|
| 260 |
+
predicates[pred_idx].append((i, base_rel))
|
| 261 |
+
|
| 262 |
+
for pred_idx, subj_list in predicates.items():
|
| 263 |
+
if len(subj_list) > 1:
|
| 264 |
+
# Sort by position to keep first subject as main
|
| 265 |
+
subj_list.sort(key=lambda x: x[0])
|
| 266 |
+
# Mark all but the first as :outer (only nsubj:outer is valid, not csubj:outer)
|
| 267 |
+
for idx, base_rel in subj_list[1:]:
|
| 268 |
+
if ':outer' not in deprel[idx]:
|
| 269 |
+
# csubj:outer is not a valid UD relation, use nsubj:outer instead
|
| 270 |
+
deprel[idx] = 'nsubj:outer'
|
| 271 |
+
|
| 272 |
+
# Fix 7: too-many-objects - add :pass or compound for multiple objects
|
| 273 |
+
predicates_obj = {}
|
| 274 |
+
for i in range(n):
|
| 275 |
+
if deprel[i] == 'obj' and head[i] > 0:
|
| 276 |
+
pred_idx = head[i]
|
| 277 |
+
if pred_idx not in predicates_obj:
|
| 278 |
+
predicates_obj[pred_idx] = []
|
| 279 |
+
predicates_obj[pred_idx].append(i)
|
| 280 |
+
|
| 281 |
+
for pred_idx, obj_indices in predicates_obj.items():
|
| 282 |
+
if len(obj_indices) > 1:
|
| 283 |
+
# Mark subsequent objects as compound
|
| 284 |
+
for idx in obj_indices[1:]:
|
| 285 |
+
# Check if it's adjacent to previous - likely compound
|
| 286 |
+
if idx > 0 and obj_indices[0] == idx - 1:
|
| 287 |
+
deprel[idx] = 'compound'
|
| 288 |
+
else:
|
| 289 |
+
deprel[idx] = 'iobj'
|
| 290 |
+
|
| 291 |
+
# Fix 8: punct-is-nonproj - attach punctuation to avoid non-projectivity
|
| 292 |
+
# Try to find the best attachment point that doesn't cross other edges
|
| 293 |
+
for i in range(n):
|
| 294 |
+
if upos[i] == 'PUNCT':
|
| 295 |
+
# Try candidates in order: previous token, next token, then expand outward
|
| 296 |
+
candidates = []
|
| 297 |
+
if i > 0:
|
| 298 |
+
candidates.append(i) # Previous token (1-based)
|
| 299 |
+
if i + 1 < n:
|
| 300 |
+
candidates.append(i + 2) # Next token (1-based)
|
| 301 |
+
|
| 302 |
+
# Expand to find more candidates
|
| 303 |
+
for dist in range(2, n):
|
| 304 |
+
if i - dist >= 0:
|
| 305 |
+
candidates.append(i - dist + 1) # 1-based
|
| 306 |
+
if i + dist < n:
|
| 307 |
+
candidates.append(i + dist + 1) # 1-based
|
| 308 |
+
|
| 309 |
+
# Find best attachment that doesn't cause crossing
|
| 310 |
+
best_head = candidates[0] if candidates else 1
|
| 311 |
+
for cand in candidates:
|
| 312 |
+
test_head = list(head)
|
| 313 |
+
test_head[i] = cand
|
| 314 |
+
if not punct_causes_crossing(i, cand - 1, test_head, n):
|
| 315 |
+
best_head = cand
|
| 316 |
+
break
|
| 317 |
+
|
| 318 |
+
head[i] = best_head
|
| 319 |
+
|
| 320 |
+
return upos, [str(h) for h in head], deprel
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def punct_causes_crossing(punct_idx, new_head_idx, head, n):
|
| 324 |
+
"""Check if attaching punct to new_head causes any edge crossing."""
|
| 325 |
+
if new_head_idx < 0 or new_head_idx >= n:
|
| 326 |
+
return False
|
| 327 |
+
|
| 328 |
+
p_low, p_high = min(punct_idx, new_head_idx), max(punct_idx, new_head_idx)
|
| 329 |
+
|
| 330 |
+
# Check all other edges for crossing with this punct edge
|
| 331 |
+
for j in range(n):
|
| 332 |
+
if j == punct_idx:
|
| 333 |
+
continue
|
| 334 |
+
if head[j] > 0 and head[j] != punct_idx + 1: # j has a head and it's not punct
|
| 335 |
+
j_head = head[j] - 1
|
| 336 |
+
if j_head < 0 or j_head >= n:
|
| 337 |
+
continue
|
| 338 |
+
j_low, j_high = min(j, j_head), max(j, j_head)
|
| 339 |
+
|
| 340 |
+
# Check if edges cross (one endpoint inside, one outside)
|
| 341 |
+
# Edges cross if: (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high)
|
| 342 |
+
if (p_low < j_low < p_high < j_high) or (j_low < p_low < j_high < p_high):
|
| 343 |
+
return True
|
| 344 |
+
|
| 345 |
+
return False
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def compute_space_after(text, tokens):
|
| 349 |
+
"""Compute SpaceAfter=No for tokens based on original text."""
|
| 350 |
+
misc = []
|
| 351 |
+
pos = 0
|
| 352 |
+
for i, token in enumerate(tokens):
|
| 353 |
+
# Find token in text
|
| 354 |
+
token_start = text.find(token, pos)
|
| 355 |
+
if token_start == -1:
|
| 356 |
+
# Token not found, assume space after
|
| 357 |
+
misc.append("_")
|
| 358 |
+
continue
|
| 359 |
+
|
| 360 |
+
token_end = token_start + len(token)
|
| 361 |
+
pos = token_end
|
| 362 |
+
|
| 363 |
+
# Check if there's a space after this token
|
| 364 |
+
if token_end < len(text):
|
| 365 |
+
next_char = text[token_end]
|
| 366 |
+
if next_char in ' \t\n':
|
| 367 |
+
misc.append("_")
|
| 368 |
+
else:
|
| 369 |
+
misc.append("SpaceAfter=No")
|
| 370 |
+
else:
|
| 371 |
+
# End of text
|
| 372 |
+
misc.append("_")
|
| 373 |
+
|
| 374 |
+
return misc
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
def load_sentences(filepath):
|
| 378 |
+
"""Load sentences from sentences.txt"""
|
| 379 |
+
sentences = []
|
| 380 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 381 |
+
for line in f:
|
| 382 |
+
line = line.strip()
|
| 383 |
+
if line:
|
| 384 |
+
parts = line.split("\t", 1)
|
| 385 |
+
if len(parts) == 2:
|
| 386 |
+
sentences.append(parts[1])
|
| 387 |
+
return sentences
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def convert_to_ud_format(sentences):
|
| 391 |
+
"""Convert sentences to UD format using dependency_parse."""
|
| 392 |
+
data = []
|
| 393 |
+
|
| 394 |
+
for idx, text in enumerate(sentences, 1):
|
| 395 |
+
if idx % 100 == 0:
|
| 396 |
+
print(f" Processing sentence {idx}/{len(sentences)}...")
|
| 397 |
+
|
| 398 |
+
sent_id = f"s{idx}"
|
| 399 |
+
|
| 400 |
+
try:
|
| 401 |
+
# Use dependency_parse for tokens, heads, and deprels
|
| 402 |
+
parsed = dependency_parse(text)
|
| 403 |
+
# parsed is list of (token, head, deprel)
|
| 404 |
+
|
| 405 |
+
tokens = [t[0] for t in parsed]
|
| 406 |
+
head = [str(t[1]) for t in parsed]
|
| 407 |
+
deprel = [t[2] for t in parsed]
|
| 408 |
+
|
| 409 |
+
# Get POS tags
|
| 410 |
+
tagged = pos_tag(text)
|
| 411 |
+
# Align POS tags with dependency tokens
|
| 412 |
+
if len(tagged) == len(tokens):
|
| 413 |
+
xpos = [t[1] for t in tagged] # Original Vietnamese tags
|
| 414 |
+
upos = [to_upos(t[1], t[0]) for t in tagged] # Universal tags with token
|
| 415 |
+
else:
|
| 416 |
+
# Fallback: use 'X' for unknown
|
| 417 |
+
xpos = ['X'] * len(tokens)
|
| 418 |
+
upos = ['X'] * len(tokens)
|
| 419 |
+
|
| 420 |
+
except Exception as e:
|
| 421 |
+
print(f" Error parsing sentence {idx}: {e}")
|
| 422 |
+
# Fallback to pos_tag only
|
| 423 |
+
tagged = pos_tag(text)
|
| 424 |
+
tokens = [t[0] for t in tagged]
|
| 425 |
+
xpos = [t[1] for t in tagged]
|
| 426 |
+
upos = [to_upos(t[1], t[0]) for t in tagged]
|
| 427 |
+
head = ["0"] * len(tokens)
|
| 428 |
+
deprel = ["dep"] * len(tokens)
|
| 429 |
+
if len(tokens) > 0:
|
| 430 |
+
deprel[0] = "root"
|
| 431 |
+
|
| 432 |
+
# Apply syntax fixes
|
| 433 |
+
upos, head, deprel = fix_syntax_errors(tokens, upos, head, deprel)
|
| 434 |
+
|
| 435 |
+
# Create other fields
|
| 436 |
+
n = len(tokens)
|
| 437 |
+
lemmas = [t.lower() for t in tokens] # Vietnamese: lemma = lowercase token
|
| 438 |
+
feats = ["_"] * n
|
| 439 |
+
deps = ["_"] * n
|
| 440 |
+
misc = compute_space_after(text, tokens) # Compute SpaceAfter
|
| 441 |
+
|
| 442 |
+
row = {
|
| 443 |
+
"sent_id": sent_id,
|
| 444 |
+
"text": text,
|
| 445 |
+
"comments": [f"# sent_id = {sent_id}", f"# text = {text}"],
|
| 446 |
+
"tokens": tokens,
|
| 447 |
+
"lemmas": lemmas,
|
| 448 |
+
"upos": upos,
|
| 449 |
+
"xpos": xpos,
|
| 450 |
+
"feats": feats,
|
| 451 |
+
"head": head,
|
| 452 |
+
"deprel": deprel,
|
| 453 |
+
"deps": deps,
|
| 454 |
+
"misc": misc,
|
| 455 |
+
"mwt": [],
|
| 456 |
+
"empty_nodes": []
|
| 457 |
+
}
|
| 458 |
+
data.append(row)
|
| 459 |
+
|
| 460 |
+
return data
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def save_jsonl(data, filepath):
|
| 464 |
+
"""Save data as JSONL format."""
|
| 465 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
| 466 |
+
for row in data:
|
| 467 |
+
f.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def save_conllu(data, filepath):
|
| 471 |
+
"""Save data as CoNLL-U format."""
|
| 472 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
| 473 |
+
for row in data:
|
| 474 |
+
f.write(f"# sent_id = {row['sent_id']}\n")
|
| 475 |
+
f.write(f"# text = {row['text']}\n")
|
| 476 |
+
for i in range(len(row['tokens'])):
|
| 477 |
+
# ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC
|
| 478 |
+
line = "\t".join([
|
| 479 |
+
str(i + 1),
|
| 480 |
+
row['tokens'][i],
|
| 481 |
+
row['lemmas'][i],
|
| 482 |
+
row['upos'][i],
|
| 483 |
+
row['xpos'][i],
|
| 484 |
+
row['feats'][i],
|
| 485 |
+
row['head'][i],
|
| 486 |
+
row['deprel'][i],
|
| 487 |
+
row['deps'][i],
|
| 488 |
+
row['misc'][i]
|
| 489 |
+
])
|
| 490 |
+
f.write(line + "\n")
|
| 491 |
+
f.write("\n")
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def main():
|
| 495 |
+
source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1")
|
| 496 |
+
sentences_file = join(source_folder, "sentences.txt")
|
| 497 |
+
|
| 498 |
+
print("Loading sentences...")
|
| 499 |
+
sentences = load_sentences(sentences_file)
|
| 500 |
+
print(f"Loaded {len(sentences)} sentences")
|
| 501 |
+
|
| 502 |
+
print("Converting to UD format...")
|
| 503 |
+
data = convert_to_ud_format(sentences)
|
| 504 |
+
|
| 505 |
+
# Save as JSONL (for HuggingFace)
|
| 506 |
+
jsonl_file = join(source_folder, "train.jsonl")
|
| 507 |
+
save_jsonl(data, jsonl_file)
|
| 508 |
+
print(f"Saved JSONL to: {jsonl_file}")
|
| 509 |
+
|
| 510 |
+
# Save as CoNLL-U (standard UD format)
|
| 511 |
+
conllu_file = join(source_folder, "train.conllu")
|
| 512 |
+
save_conllu(data, conllu_file)
|
| 513 |
+
print(f"Saved CoNLL-U to: {conllu_file}")
|
| 514 |
+
|
| 515 |
+
# Print sample
|
| 516 |
+
print("\nSample row:")
|
| 517 |
+
sample = data[0]
|
| 518 |
+
print(f" sent_id: {sample['sent_id']}")
|
| 519 |
+
print(f" text: {sample['text'][:60]}...")
|
| 520 |
+
print(f" tokens: {sample['tokens'][:5]}...")
|
| 521 |
+
print(f" upos: {sample['upos'][:5]}...")
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
if __name__ == "__main__":
|
| 525 |
+
main()
|
scripts/fetch_data.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fetch data from HuggingFace dataset undertheseanlp/UTS_VLC
|
| 3 |
+
- Get documents from law dataset
|
| 4 |
+
- Segment sentences using underthesea
|
| 5 |
+
- Get first 3000 sentences
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import re
|
| 9 |
+
from os.path import dirname, join
|
| 10 |
+
|
| 11 |
+
from datasets import load_dataset
|
| 12 |
+
|
| 13 |
+
from underthesea import sent_tokenize, text_normalize
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def clean_text(text):
|
| 17 |
+
"""Remove markdown formatting and clean text."""
|
| 18 |
+
# Normalize Unicode using underthesea
|
| 19 |
+
text = text_normalize(text)
|
| 20 |
+
# Remove markdown headers
|
| 21 |
+
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
|
| 22 |
+
# Remove bold/italic markers
|
| 23 |
+
text = re.sub(r'\*+', '', text)
|
| 24 |
+
# Remove horizontal rules
|
| 25 |
+
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
|
| 26 |
+
# Remove links
|
| 27 |
+
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
|
| 28 |
+
# Remove multiple newlines
|
| 29 |
+
text = re.sub(r'\n{2,}', '\n', text)
|
| 30 |
+
# Remove leading/trailing whitespace per line
|
| 31 |
+
lines = [line.strip() for line in text.split('\n')]
|
| 32 |
+
text = '\n'.join(lines)
|
| 33 |
+
return text
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def is_valid_sentence(sent):
|
| 37 |
+
"""Check if sentence is valid for UD annotation."""
|
| 38 |
+
sent = sent.strip()
|
| 39 |
+
# Remove trailing list markers like "1." or "a)"
|
| 40 |
+
sent = re.sub(r'\n\d+\.$', '', sent)
|
| 41 |
+
sent = re.sub(r'\n[a-z]\)$', '', sent)
|
| 42 |
+
sent = sent.strip()
|
| 43 |
+
|
| 44 |
+
if not sent:
|
| 45 |
+
return False, sent
|
| 46 |
+
# Too short
|
| 47 |
+
if len(sent) < 20:
|
| 48 |
+
return False, sent
|
| 49 |
+
# Too long
|
| 50 |
+
if len(sent) > 300:
|
| 51 |
+
return False, sent
|
| 52 |
+
# Skip headers (all caps, or starts with "Điều", "Chương", etc.)
|
| 53 |
+
if re.match(r'^(QUỐC HỘI|CỘNG HÒA|Độc lập|Phần thứ|Chương [IVX]+|MỤC \d+)', sent):
|
| 54 |
+
return False, sent
|
| 55 |
+
# Skip article titles
|
| 56 |
+
if re.match(r'^(Điều \d+|Khoản \d+|Mục \d+)', sent):
|
| 57 |
+
return False, sent
|
| 58 |
+
# Skip if mostly uppercase
|
| 59 |
+
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5:
|
| 60 |
+
return False, sent
|
| 61 |
+
# Skip if starts with special markers
|
| 62 |
+
if sent.startswith(('English:', 'Số hiệu:', 'Ngày hiệu lực:', '---', '|')):
|
| 63 |
+
return False, sent
|
| 64 |
+
# Must contain Vietnamese characters
|
| 65 |
+
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
|
| 66 |
+
return False, sent
|
| 67 |
+
# Skip if ends with just a number (incomplete sentence)
|
| 68 |
+
if re.search(r'\n\d+$', sent):
|
| 69 |
+
return False, sent
|
| 70 |
+
return True, sent
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def fetch_and_process():
|
| 74 |
+
# Load dataset from HuggingFace
|
| 75 |
+
print("Loading dataset from HuggingFace...")
|
| 76 |
+
ds = load_dataset("undertheseanlp/UTS_VLC", split="2026")
|
| 77 |
+
|
| 78 |
+
# Segment sentences from all documents until we have 3000
|
| 79 |
+
print("Segmenting sentences...")
|
| 80 |
+
all_sentences = []
|
| 81 |
+
for idx, doc in enumerate(ds):
|
| 82 |
+
content = doc["content"]
|
| 83 |
+
content = clean_text(content)
|
| 84 |
+
sentences = sent_tokenize(content)
|
| 85 |
+
for sent in sentences:
|
| 86 |
+
sent = sent.strip()
|
| 87 |
+
is_valid, cleaned_sent = is_valid_sentence(sent)
|
| 88 |
+
if is_valid:
|
| 89 |
+
all_sentences.append(cleaned_sent)
|
| 90 |
+
if len(all_sentences) >= 3000:
|
| 91 |
+
print(f"Processed {idx + 1} documents")
|
| 92 |
+
break
|
| 93 |
+
|
| 94 |
+
# Get first 3000 sentences
|
| 95 |
+
sentences_3000 = all_sentences[:3000]
|
| 96 |
+
print(f"Total sentences collected: {len(sentences_3000)}")
|
| 97 |
+
|
| 98 |
+
# Save to output file
|
| 99 |
+
output_dir = dirname(dirname(__file__))
|
| 100 |
+
output_file = join(output_dir, "sentences.txt")
|
| 101 |
+
|
| 102 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
| 103 |
+
for i, sent in enumerate(sentences_3000, 1):
|
| 104 |
+
f.write(f"{i}\t{sent}\n")
|
| 105 |
+
|
| 106 |
+
print(f"Saved to: {output_file}")
|
| 107 |
+
|
| 108 |
+
# Print sample
|
| 109 |
+
print("\nSample sentences:")
|
| 110 |
+
for i, sent in enumerate(sentences_3000[:5], 1):
|
| 111 |
+
print(f" {i}. {sent[:80]}...")
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
if __name__ == "__main__":
|
| 115 |
+
fetch_and_process()
|
scripts/statistics.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Statistics for UD Vietnamese Dataset (UDD-v0.1)
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from collections import Counter
|
| 6 |
+
from os.path import dirname, join
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def parse_conllu(filepath):
|
| 10 |
+
"""Parse CoNLL-U file and return sentences."""
|
| 11 |
+
sentences = []
|
| 12 |
+
current_sentence = {
|
| 13 |
+
'tokens': [],
|
| 14 |
+
'upos': [],
|
| 15 |
+
'deprel': [],
|
| 16 |
+
'head': [],
|
| 17 |
+
'metadata': {}
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 21 |
+
for line in f:
|
| 22 |
+
line = line.strip()
|
| 23 |
+
if not line:
|
| 24 |
+
if current_sentence['tokens']:
|
| 25 |
+
sentences.append(current_sentence)
|
| 26 |
+
current_sentence = {
|
| 27 |
+
'tokens': [],
|
| 28 |
+
'upos': [],
|
| 29 |
+
'deprel': [],
|
| 30 |
+
'head': [],
|
| 31 |
+
'metadata': {}
|
| 32 |
+
}
|
| 33 |
+
elif line.startswith('#'):
|
| 34 |
+
# Metadata
|
| 35 |
+
if '=' in line:
|
| 36 |
+
key, value = line[2:].split('=', 1)
|
| 37 |
+
current_sentence['metadata'][key.strip()] = value.strip()
|
| 38 |
+
else:
|
| 39 |
+
parts = line.split('\t')
|
| 40 |
+
if len(parts) >= 10:
|
| 41 |
+
# Skip multi-word tokens (e.g., 1-2)
|
| 42 |
+
if '-' in parts[0] or '.' in parts[0]:
|
| 43 |
+
continue
|
| 44 |
+
current_sentence['tokens'].append(parts[1])
|
| 45 |
+
current_sentence['upos'].append(parts[3])
|
| 46 |
+
current_sentence['head'].append(parts[6])
|
| 47 |
+
current_sentence['deprel'].append(parts[7])
|
| 48 |
+
|
| 49 |
+
# Add last sentence if exists
|
| 50 |
+
if current_sentence['tokens']:
|
| 51 |
+
sentences.append(current_sentence)
|
| 52 |
+
|
| 53 |
+
return sentences
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def compute_statistics(sentences):
|
| 57 |
+
"""Compute statistics from parsed sentences."""
|
| 58 |
+
stats = {}
|
| 59 |
+
|
| 60 |
+
# Basic counts
|
| 61 |
+
stats['num_sentences'] = len(sentences)
|
| 62 |
+
stats['num_tokens'] = sum(len(s['tokens']) for s in sentences)
|
| 63 |
+
|
| 64 |
+
# Sentence length statistics
|
| 65 |
+
sent_lengths = [len(s['tokens']) for s in sentences]
|
| 66 |
+
stats['avg_sent_length'] = sum(sent_lengths) / len(sent_lengths) if sent_lengths else 0
|
| 67 |
+
stats['min_sent_length'] = min(sent_lengths) if sent_lengths else 0
|
| 68 |
+
stats['max_sent_length'] = max(sent_lengths) if sent_lengths else 0
|
| 69 |
+
|
| 70 |
+
# UPOS distribution
|
| 71 |
+
all_upos = []
|
| 72 |
+
for s in sentences:
|
| 73 |
+
all_upos.extend(s['upos'])
|
| 74 |
+
stats['upos_counts'] = Counter(all_upos)
|
| 75 |
+
|
| 76 |
+
# DEPREL distribution
|
| 77 |
+
all_deprel = []
|
| 78 |
+
for s in sentences:
|
| 79 |
+
all_deprel.extend(s['deprel'])
|
| 80 |
+
stats['deprel_counts'] = Counter(all_deprel)
|
| 81 |
+
|
| 82 |
+
# Tree depth statistics
|
| 83 |
+
depths = []
|
| 84 |
+
for s in sentences:
|
| 85 |
+
max_depth = compute_tree_depth(s['head'])
|
| 86 |
+
depths.append(max_depth)
|
| 87 |
+
stats['avg_tree_depth'] = sum(depths) / len(depths) if depths else 0
|
| 88 |
+
stats['max_tree_depth'] = max(depths) if depths else 0
|
| 89 |
+
|
| 90 |
+
# Root relation counts
|
| 91 |
+
root_upos = []
|
| 92 |
+
for s in sentences:
|
| 93 |
+
for i, (upos, deprel) in enumerate(zip(s['upos'], s['deprel'])):
|
| 94 |
+
if deprel == 'root':
|
| 95 |
+
root_upos.append(upos)
|
| 96 |
+
stats['root_upos_counts'] = Counter(root_upos)
|
| 97 |
+
|
| 98 |
+
return stats
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def compute_tree_depth(heads):
|
| 102 |
+
"""Compute maximum depth of dependency tree."""
|
| 103 |
+
n = len(heads)
|
| 104 |
+
if n == 0:
|
| 105 |
+
return 0
|
| 106 |
+
|
| 107 |
+
depths = [0] * n
|
| 108 |
+
|
| 109 |
+
def get_depth(idx):
|
| 110 |
+
if depths[idx] > 0:
|
| 111 |
+
return depths[idx]
|
| 112 |
+
head = int(heads[idx])
|
| 113 |
+
if head == 0:
|
| 114 |
+
depths[idx] = 1
|
| 115 |
+
else:
|
| 116 |
+
depths[idx] = get_depth(head - 1) + 1
|
| 117 |
+
return depths[idx]
|
| 118 |
+
|
| 119 |
+
for i in range(n):
|
| 120 |
+
try:
|
| 121 |
+
get_depth(i)
|
| 122 |
+
except (RecursionError, IndexError):
|
| 123 |
+
depths[i] = 1
|
| 124 |
+
|
| 125 |
+
return max(depths) if depths else 0
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def print_statistics(stats):
|
| 129 |
+
"""Print statistics in a nice format."""
|
| 130 |
+
print("=" * 60)
|
| 131 |
+
print("UD Vietnamese Dataset (UDD-v0.1) Statistics")
|
| 132 |
+
print("=" * 60)
|
| 133 |
+
|
| 134 |
+
print("\n## Basic Statistics")
|
| 135 |
+
print(f" Sentences: {stats['num_sentences']:,}")
|
| 136 |
+
print(f" Tokens: {stats['num_tokens']:,}")
|
| 137 |
+
print(f" Avg sent length: {stats['avg_sent_length']:.2f}")
|
| 138 |
+
print(f" Min sent length: {stats['min_sent_length']}")
|
| 139 |
+
print(f" Max sent length: {stats['max_sent_length']}")
|
| 140 |
+
print(f" Avg tree depth: {stats['avg_tree_depth']:.2f}")
|
| 141 |
+
print(f" Max tree depth: {stats['max_tree_depth']}")
|
| 142 |
+
|
| 143 |
+
print("\n## UPOS Distribution")
|
| 144 |
+
print(f" {'Tag':<10} {'Count':>8} {'Percent':>8}")
|
| 145 |
+
print(" " + "-" * 28)
|
| 146 |
+
total_tokens = stats['num_tokens']
|
| 147 |
+
for tag, count in stats['upos_counts'].most_common():
|
| 148 |
+
pct = count / total_tokens * 100
|
| 149 |
+
print(f" {tag:<10} {count:>8,} {pct:>7.2f}%")
|
| 150 |
+
|
| 151 |
+
print("\n## DEPREL Distribution")
|
| 152 |
+
print(f" {'Relation':<20} {'Count':>8} {'Percent':>8}")
|
| 153 |
+
print(" " + "-" * 38)
|
| 154 |
+
for rel, count in stats['deprel_counts'].most_common():
|
| 155 |
+
pct = count / total_tokens * 100
|
| 156 |
+
print(f" {rel:<20} {count:>8,} {pct:>7.2f}%")
|
| 157 |
+
|
| 158 |
+
print("\n## Root UPOS Distribution")
|
| 159 |
+
print(f" {'UPOS':<10} {'Count':>8} {'Percent':>8}")
|
| 160 |
+
print(" " + "-" * 28)
|
| 161 |
+
total_roots = sum(stats['root_upos_counts'].values())
|
| 162 |
+
for tag, count in stats['root_upos_counts'].most_common():
|
| 163 |
+
pct = count / total_roots * 100
|
| 164 |
+
print(f" {tag:<10} {count:>8,} {pct:>7.2f}%")
|
| 165 |
+
|
| 166 |
+
print("\n" + "=" * 60)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def main():
|
| 170 |
+
# Find train.conllu file
|
| 171 |
+
base_dir = dirname(dirname(__file__))
|
| 172 |
+
conllu_file = join(base_dir, 'train.conllu')
|
| 173 |
+
|
| 174 |
+
print(f"Reading: {conllu_file}")
|
| 175 |
+
sentences = parse_conllu(conllu_file)
|
| 176 |
+
|
| 177 |
+
stats = compute_statistics(sentences)
|
| 178 |
+
print_statistics(stats)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
if __name__ == "__main__":
|
| 182 |
+
main()
|
scripts/upload_to_hf.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Upload UD dataset to HuggingFace Hub.
|
| 3 |
+
Dataset: undertheseanlp/UDD-v0.1
|
| 4 |
+
|
| 5 |
+
Usage:
|
| 6 |
+
export $(cat .env | xargs) && python upload_to_hf.py
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
import os
|
| 11 |
+
from os.path import expanduser, join
|
| 12 |
+
|
| 13 |
+
from datasets import Dataset, DatasetDict
|
| 14 |
+
from huggingface_hub import HfApi, login
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def load_jsonl(filepath):
|
| 18 |
+
"""Load JSONL file."""
|
| 19 |
+
data = []
|
| 20 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 21 |
+
for line in f:
|
| 22 |
+
data.append(json.loads(line))
|
| 23 |
+
return data
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def main():
|
| 27 |
+
# Login with token from environment
|
| 28 |
+
token = os.environ.get("HF_TOKEN")
|
| 29 |
+
if token:
|
| 30 |
+
print("Logging in with HF_TOKEN...")
|
| 31 |
+
login(token=token)
|
| 32 |
+
else:
|
| 33 |
+
print("Warning: HF_TOKEN not set. Using cached credentials.")
|
| 34 |
+
|
| 35 |
+
source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1")
|
| 36 |
+
jsonl_file = join(source_folder, "train.jsonl")
|
| 37 |
+
readme_file = join(source_folder, "README.md")
|
| 38 |
+
|
| 39 |
+
print("Loading data...")
|
| 40 |
+
data = load_jsonl(jsonl_file)
|
| 41 |
+
print(f"Loaded {len(data)} sentences")
|
| 42 |
+
|
| 43 |
+
# Create HuggingFace Dataset
|
| 44 |
+
print("Creating HuggingFace Dataset...")
|
| 45 |
+
dataset = Dataset.from_list(data)
|
| 46 |
+
|
| 47 |
+
# Create DatasetDict with train split
|
| 48 |
+
dataset_dict = DatasetDict({
|
| 49 |
+
"train": dataset
|
| 50 |
+
})
|
| 51 |
+
|
| 52 |
+
print(f"Dataset: {dataset_dict}")
|
| 53 |
+
print(f"Features: {dataset.features}")
|
| 54 |
+
|
| 55 |
+
# Push to HuggingFace Hub
|
| 56 |
+
repo_id = "undertheseanlp/UDD-v0.1"
|
| 57 |
+
print(f"\nPushing to HuggingFace Hub: {repo_id}")
|
| 58 |
+
|
| 59 |
+
dataset_dict.push_to_hub(
|
| 60 |
+
repo_id,
|
| 61 |
+
private=False,
|
| 62 |
+
commit_message="Update: 1000 sentences from Vietnamese Legal Corpus"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
# Upload README.md
|
| 66 |
+
print("Uploading README.md...")
|
| 67 |
+
api = HfApi()
|
| 68 |
+
api.upload_file(
|
| 69 |
+
path_or_fileobj=readme_file,
|
| 70 |
+
path_in_repo="README.md",
|
| 71 |
+
repo_id=repo_id,
|
| 72 |
+
repo_type="dataset",
|
| 73 |
+
commit_message="Add README with dataset card"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
print(f"\nDone! Dataset available at: https://huggingface.co/datasets/{repo_id}")
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
if __name__ == "__main__":
|
| 80 |
+
main()
|
ud-tools/requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
udapi>=0.5.0
|
| 2 |
+
regex>=2020.09.27
|
ud-tools/udtools/LICENSE.txt
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
GNU GENERAL PUBLIC LICENSE
|
| 2 |
+
Version 2, June 1991
|
| 3 |
+
|
| 4 |
+
Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
|
| 5 |
+
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
| 6 |
+
Everyone is permitted to copy and distribute verbatim copies
|
| 7 |
+
of this license document, but changing it is not allowed.
|
| 8 |
+
|
| 9 |
+
Preamble
|
| 10 |
+
|
| 11 |
+
The licenses for most software are designed to take away your
|
| 12 |
+
freedom to share and change it. By contrast, the GNU General Public
|
| 13 |
+
License is intended to guarantee your freedom to share and change free
|
| 14 |
+
software--to make sure the software is free for all its users. This
|
| 15 |
+
General Public License applies to most of the Free Software
|
| 16 |
+
Foundation's software and to any other program whose authors commit to
|
| 17 |
+
using it. (Some other Free Software Foundation software is covered by
|
| 18 |
+
the GNU Lesser General Public License instead.) You can apply it to
|
| 19 |
+
your programs, too.
|
| 20 |
+
|
| 21 |
+
When we speak of free software, we are referring to freedom, not
|
| 22 |
+
price. Our General Public Licenses are designed to make sure that you
|
| 23 |
+
have the freedom to distribute copies of free software (and charge for
|
| 24 |
+
this service if you wish), that you receive source code or can get it
|
| 25 |
+
if you want it, that you can change the software or use pieces of it
|
| 26 |
+
in new free programs; and that you know you can do these things.
|
| 27 |
+
|
| 28 |
+
To protect your rights, we need to make restrictions that forbid
|
| 29 |
+
anyone to deny you these rights or to ask you to surrender the rights.
|
| 30 |
+
These restrictions translate to certain responsibilities for you if you
|
| 31 |
+
distribute copies of the software, or if you modify it.
|
| 32 |
+
|
| 33 |
+
For example, if you distribute copies of such a program, whether
|
| 34 |
+
gratis or for a fee, you must give the recipients all the rights that
|
| 35 |
+
you have. You must make sure that they, too, receive or can get the
|
| 36 |
+
source code. And you must show them these terms so they know their
|
| 37 |
+
rights.
|
| 38 |
+
|
| 39 |
+
We protect your rights with two steps: (1) copyright the software, and
|
| 40 |
+
(2) offer you this license which gives you legal permission to copy,
|
| 41 |
+
distribute and/or modify the software.
|
| 42 |
+
|
| 43 |
+
Also, for each author's protection and ours, we want to make certain
|
| 44 |
+
that everyone understands that there is no warranty for this free
|
| 45 |
+
software. If the software is modified by someone else and passed on, we
|
| 46 |
+
want its recipients to know that what they have is not the original, so
|
| 47 |
+
that any problems introduced by others will not reflect on the original
|
| 48 |
+
authors' reputations.
|
| 49 |
+
|
| 50 |
+
Finally, any free program is threatened constantly by software
|
| 51 |
+
patents. We wish to avoid the danger that redistributors of a free
|
| 52 |
+
program will individually obtain patent licenses, in effect making the
|
| 53 |
+
program proprietary. To prevent this, we have made it clear that any
|
| 54 |
+
patent must be licensed for everyone's free use or not licensed at all.
|
| 55 |
+
|
| 56 |
+
The precise terms and conditions for copying, distribution and
|
| 57 |
+
modification follow.
|
| 58 |
+
|
| 59 |
+
GNU GENERAL PUBLIC LICENSE
|
| 60 |
+
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
| 61 |
+
|
| 62 |
+
0. This License applies to any program or other work which contains
|
| 63 |
+
a notice placed by the copyright holder saying it may be distributed
|
| 64 |
+
under the terms of this General Public License. The "Program", below,
|
| 65 |
+
refers to any such program or work, and a "work based on the Program"
|
| 66 |
+
means either the Program or any derivative work under copyright law:
|
| 67 |
+
that is to say, a work containing the Program or a portion of it,
|
| 68 |
+
either verbatim or with modifications and/or translated into another
|
| 69 |
+
language. (Hereinafter, translation is included without limitation in
|
| 70 |
+
the term "modification".) Each licensee is addressed as "you".
|
| 71 |
+
|
| 72 |
+
Activities other than copying, distribution and modification are not
|
| 73 |
+
covered by this License; they are outside its scope. The act of
|
| 74 |
+
running the Program is not restricted, and the output from the Program
|
| 75 |
+
is covered only if its contents constitute a work based on the
|
| 76 |
+
Program (independent of having been made by running the Program).
|
| 77 |
+
Whether that is true depends on what the Program does.
|
| 78 |
+
|
| 79 |
+
1. You may copy and distribute verbatim copies of the Program's
|
| 80 |
+
source code as you receive it, in any medium, provided that you
|
| 81 |
+
conspicuously and appropriately publish on each copy an appropriate
|
| 82 |
+
copyright notice and disclaimer of warranty; keep intact all the
|
| 83 |
+
notices that refer to this License and to the absence of any warranty;
|
| 84 |
+
and give any other recipients of the Program a copy of this License
|
| 85 |
+
along with the Program.
|
| 86 |
+
|
| 87 |
+
You may charge a fee for the physical act of transferring a copy, and
|
| 88 |
+
you may at your option offer warranty protection in exchange for a fee.
|
| 89 |
+
|
| 90 |
+
2. You may modify your copy or copies of the Program or any portion
|
| 91 |
+
of it, thus forming a work based on the Program, and copy and
|
| 92 |
+
distribute such modifications or work under the terms of Section 1
|
| 93 |
+
above, provided that you also meet all of these conditions:
|
| 94 |
+
|
| 95 |
+
a) You must cause the modified files to carry prominent notices
|
| 96 |
+
stating that you changed the files and the date of any change.
|
| 97 |
+
|
| 98 |
+
b) You must cause any work that you distribute or publish, that in
|
| 99 |
+
whole or in part contains or is derived from the Program or any
|
| 100 |
+
part thereof, to be licensed as a whole at no charge to all third
|
| 101 |
+
parties under the terms of this License.
|
| 102 |
+
|
| 103 |
+
c) If the modified program normally reads commands interactively
|
| 104 |
+
when run, you must cause it, when started running for such
|
| 105 |
+
interactive use in the most ordinary way, to print or display an
|
| 106 |
+
announcement including an appropriate copyright notice and a
|
| 107 |
+
notice that there is no warranty (or else, saying that you provide
|
| 108 |
+
a warranty) and that users may redistribute the program under
|
| 109 |
+
these conditions, and telling the user how to view a copy of this
|
| 110 |
+
License. (Exception: if the Program itself is interactive but
|
| 111 |
+
does not normally print such an announcement, your work based on
|
| 112 |
+
the Program is not required to print an announcement.)
|
| 113 |
+
|
| 114 |
+
These requirements apply to the modified work as a whole. If
|
| 115 |
+
identifiable sections of that work are not derived from the Program,
|
| 116 |
+
and can be reasonably considered independent and separate works in
|
| 117 |
+
themselves, then this License, and its terms, do not apply to those
|
| 118 |
+
sections when you distribute them as separate works. But when you
|
| 119 |
+
distribute the same sections as part of a whole which is a work based
|
| 120 |
+
on the Program, the distribution of the whole must be on the terms of
|
| 121 |
+
this License, whose permissions for other licensees extend to the
|
| 122 |
+
entire whole, and thus to each and every part regardless of who wrote it.
|
| 123 |
+
|
| 124 |
+
Thus, it is not the intent of this section to claim rights or contest
|
| 125 |
+
your rights to work written entirely by you; rather, the intent is to
|
| 126 |
+
exercise the right to control the distribution of derivative or
|
| 127 |
+
collective works based on the Program.
|
| 128 |
+
|
| 129 |
+
In addition, mere aggregation of another work not based on the Program
|
| 130 |
+
with the Program (or with a work based on the Program) on a volume of
|
| 131 |
+
a storage or distribution medium does not bring the other work under
|
| 132 |
+
the scope of this License.
|
| 133 |
+
|
| 134 |
+
3. You may copy and distribute the Program (or a work based on it,
|
| 135 |
+
under Section 2) in object code or executable form under the terms of
|
| 136 |
+
Sections 1 and 2 above provided that you also do one of the following:
|
| 137 |
+
|
| 138 |
+
a) Accompany it with the complete corresponding machine-readable
|
| 139 |
+
source code, which must be distributed under the terms of Sections
|
| 140 |
+
1 and 2 above on a medium customarily used for software interchange; or,
|
| 141 |
+
|
| 142 |
+
b) Accompany it with a written offer, valid for at least three
|
| 143 |
+
years, to give any third party, for a charge no more than your
|
| 144 |
+
cost of physically performing source distribution, a complete
|
| 145 |
+
machine-readable copy of the corresponding source code, to be
|
| 146 |
+
distributed under the terms of Sections 1 and 2 above on a medium
|
| 147 |
+
customarily used for software interchange; or,
|
| 148 |
+
|
| 149 |
+
c) Accompany it with the information you received as to the offer
|
| 150 |
+
to distribute corresponding source code. (This alternative is
|
| 151 |
+
allowed only for noncommercial distribution and only if you
|
| 152 |
+
received the program in object code or executable form with such
|
| 153 |
+
an offer, in accord with Subsection b above.)
|
| 154 |
+
|
| 155 |
+
The source code for a work means the preferred form of the work for
|
| 156 |
+
making modifications to it. For an executable work, complete source
|
| 157 |
+
code means all the source code for all modules it contains, plus any
|
| 158 |
+
associated interface definition files, plus the scripts used to
|
| 159 |
+
control compilation and installation of the executable. However, as a
|
| 160 |
+
special exception, the source code distributed need not include
|
| 161 |
+
anything that is normally distributed (in either source or binary
|
| 162 |
+
form) with the major components (compiler, kernel, and so on) of the
|
| 163 |
+
operating system on which the executable runs, unless that component
|
| 164 |
+
itself accompanies the executable.
|
| 165 |
+
|
| 166 |
+
If distribution of executable or object code is made by offering
|
| 167 |
+
access to copy from a designated place, then offering equivalent
|
| 168 |
+
access to copy the source code from the same place counts as
|
| 169 |
+
distribution of the source code, even though third parties are not
|
| 170 |
+
compelled to copy the source along with the object code.
|
| 171 |
+
|
| 172 |
+
4. You may not copy, modify, sublicense, or distribute the Program
|
| 173 |
+
except as expressly provided under this License. Any attempt
|
| 174 |
+
otherwise to copy, modify, sublicense or distribute the Program is
|
| 175 |
+
void, and will automatically terminate your rights under this License.
|
| 176 |
+
However, parties who have received copies, or rights, from you under
|
| 177 |
+
this License will not have their licenses terminated so long as such
|
| 178 |
+
parties remain in full compliance.
|
| 179 |
+
|
| 180 |
+
5. You are not required to accept this License, since you have not
|
| 181 |
+
signed it. However, nothing else grants you permission to modify or
|
| 182 |
+
distribute the Program or its derivative works. These actions are
|
| 183 |
+
prohibited by law if you do not accept this License. Therefore, by
|
| 184 |
+
modifying or distributing the Program (or any work based on the
|
| 185 |
+
Program), you indicate your acceptance of this License to do so, and
|
| 186 |
+
all its terms and conditions for copying, distributing or modifying
|
| 187 |
+
the Program or works based on it.
|
| 188 |
+
|
| 189 |
+
6. Each time you redistribute the Program (or any work based on the
|
| 190 |
+
Program), the recipient automatically receives a license from the
|
| 191 |
+
original licensor to copy, distribute or modify the Program subject to
|
| 192 |
+
these terms and conditions. You may not impose any further
|
| 193 |
+
restrictions on the recipients' exercise of the rights granted herein.
|
| 194 |
+
You are not responsible for enforcing compliance by third parties to
|
| 195 |
+
this License.
|
| 196 |
+
|
| 197 |
+
7. If, as a consequence of a court judgment or allegation of patent
|
| 198 |
+
infringement or for any other reason (not limited to patent issues),
|
| 199 |
+
conditions are imposed on you (whether by court order, agreement or
|
| 200 |
+
otherwise) that contradict the conditions of this License, they do not
|
| 201 |
+
excuse you from the conditions of this License. If you cannot
|
| 202 |
+
distribute so as to satisfy simultaneously your obligations under this
|
| 203 |
+
License and any other pertinent obligations, then as a consequence you
|
| 204 |
+
may not distribute the Program at all. For example, if a patent
|
| 205 |
+
license would not permit royalty-free redistribution of the Program by
|
| 206 |
+
all those who receive copies directly or indirectly through you, then
|
| 207 |
+
the only way you could satisfy both it and this License would be to
|
| 208 |
+
refrain entirely from distribution of the Program.
|
| 209 |
+
|
| 210 |
+
If any portion of this section is held invalid or unenforceable under
|
| 211 |
+
any particular circumstance, the balance of the section is intended to
|
| 212 |
+
apply and the section as a whole is intended to apply in other
|
| 213 |
+
circumstances.
|
| 214 |
+
|
| 215 |
+
It is not the purpose of this section to induce you to infringe any
|
| 216 |
+
patents or other property right claims or to contest validity of any
|
| 217 |
+
such claims; this section has the sole purpose of protecting the
|
| 218 |
+
integrity of the free software distribution system, which is
|
| 219 |
+
implemented by public license practices. Many people have made
|
| 220 |
+
generous contributions to the wide range of software distributed
|
| 221 |
+
through that system in reliance on consistent application of that
|
| 222 |
+
system; it is up to the author/donor to decide if he or she is willing
|
| 223 |
+
to distribute software through any other system and a licensee cannot
|
| 224 |
+
impose that choice.
|
| 225 |
+
|
| 226 |
+
This section is intended to make thoroughly clear what is believed to
|
| 227 |
+
be a consequence of the rest of this License.
|
| 228 |
+
|
| 229 |
+
8. If the distribution and/or use of the Program is restricted in
|
| 230 |
+
certain countries either by patents or by copyrighted interfaces, the
|
| 231 |
+
original copyright holder who places the Program under this License
|
| 232 |
+
may add an explicit geographical distribution limitation excluding
|
| 233 |
+
those countries, so that distribution is permitted only in or among
|
| 234 |
+
countries not thus excluded. In such case, this License incorporates
|
| 235 |
+
the limitation as if written in the body of this License.
|
| 236 |
+
|
| 237 |
+
9. The Free Software Foundation may publish revised and/or new versions
|
| 238 |
+
of the General Public License from time to time. Such new versions will
|
| 239 |
+
be similar in spirit to the present version, but may differ in detail to
|
| 240 |
+
address new problems or concerns.
|
| 241 |
+
|
| 242 |
+
Each version is given a distinguishing version number. If the Program
|
| 243 |
+
specifies a version number of this License which applies to it and "any
|
| 244 |
+
later version", you have the option of following the terms and conditions
|
| 245 |
+
either of that version or of any later version published by the Free
|
| 246 |
+
Software Foundation. If the Program does not specify a version number of
|
| 247 |
+
this License, you may choose any version ever published by the Free Software
|
| 248 |
+
Foundation.
|
| 249 |
+
|
| 250 |
+
10. If you wish to incorporate parts of the Program into other free
|
| 251 |
+
programs whose distribution conditions are different, write to the author
|
| 252 |
+
to ask for permission. For software which is copyrighted by the Free
|
| 253 |
+
Software Foundation, write to the Free Software Foundation; we sometimes
|
| 254 |
+
make exceptions for this. Our decision will be guided by the two goals
|
| 255 |
+
of preserving the free status of all derivatives of our free software and
|
| 256 |
+
of promoting the sharing and reuse of software generally.
|
| 257 |
+
|
| 258 |
+
NO WARRANTY
|
| 259 |
+
|
| 260 |
+
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
| 261 |
+
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
| 262 |
+
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
| 263 |
+
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
| 264 |
+
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
| 265 |
+
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
| 266 |
+
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
| 267 |
+
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
| 268 |
+
REPAIR OR CORRECTION.
|
| 269 |
+
|
| 270 |
+
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
| 271 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
| 272 |
+
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
| 273 |
+
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
| 274 |
+
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
| 275 |
+
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
| 276 |
+
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
| 277 |
+
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
| 278 |
+
POSSIBILITY OF SUCH DAMAGES.
|
| 279 |
+
|
| 280 |
+
END OF TERMS AND CONDITIONS
|
| 281 |
+
|
| 282 |
+
How to Apply These Terms to Your New Programs
|
| 283 |
+
|
| 284 |
+
If you develop a new program, and you want it to be of the greatest
|
| 285 |
+
possible use to the public, the best way to achieve this is to make it
|
| 286 |
+
free software which everyone can redistribute and change under these terms.
|
| 287 |
+
|
| 288 |
+
To do so, attach the following notices to the program. It is safest
|
| 289 |
+
to attach them to the start of each source file to most effectively
|
| 290 |
+
convey the exclusion of warranty; and each file should have at least
|
| 291 |
+
the "copyright" line and a pointer to where the full notice is found.
|
| 292 |
+
|
| 293 |
+
{description}
|
| 294 |
+
Copyright (C) {year} {fullname}
|
| 295 |
+
|
| 296 |
+
This program is free software; you can redistribute it and/or modify
|
| 297 |
+
it under the terms of the GNU General Public License as published by
|
| 298 |
+
the Free Software Foundation; either version 2 of the License, or
|
| 299 |
+
(at your option) any later version.
|
| 300 |
+
|
| 301 |
+
This program is distributed in the hope that it will be useful,
|
| 302 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
| 303 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
| 304 |
+
GNU General Public License for more details.
|
| 305 |
+
|
| 306 |
+
You should have received a copy of the GNU General Public License along
|
| 307 |
+
with this program; if not, write to the Free Software Foundation, Inc.,
|
| 308 |
+
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
| 309 |
+
|
| 310 |
+
Also add information on how to contact you by electronic and paper mail.
|
| 311 |
+
|
| 312 |
+
If the program is interactive, make it output a short notice like this
|
| 313 |
+
when it starts in an interactive mode:
|
| 314 |
+
|
| 315 |
+
Gnomovision version 69, Copyright (C) year name of author
|
| 316 |
+
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
| 317 |
+
This is free software, and you are welcome to redistribute it
|
| 318 |
+
under certain conditions; type `show c' for details.
|
| 319 |
+
|
| 320 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
| 321 |
+
parts of the General Public License. Of course, the commands you use may
|
| 322 |
+
be called something other than `show w' and `show c'; they could even be
|
| 323 |
+
mouse-clicks or menu items--whatever suits your program.
|
| 324 |
+
|
| 325 |
+
You should also get your employer (if you work as a programmer) or your
|
| 326 |
+
school, if any, to sign a "copyright disclaimer" for the program, if
|
| 327 |
+
necessary. Here is a sample; alter the names:
|
| 328 |
+
|
| 329 |
+
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
| 330 |
+
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
| 331 |
+
|
| 332 |
+
{signature of Ty Coon}, 1 April 1989
|
| 333 |
+
Ty Coon, President of Vice
|
| 334 |
+
|
| 335 |
+
This General Public License does not permit incorporating your program into
|
| 336 |
+
proprietary programs. If your program is a subroutine library, you may
|
| 337 |
+
consider it more useful to permit linking proprietary applications with the
|
| 338 |
+
library. If this is what you want to do, use the GNU Lesser General
|
| 339 |
+
Public License instead of this License.
|
ud-tools/udtools/MANIFEST.in
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
recursive-include tests *.*
|
ud-tools/udtools/README.md
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UD Tools
|
| 2 |
+
|
| 3 |
+
This package contains Python tools for [Universal Dependencies](https://universaldependencies.org/):
|
| 4 |
+
|
| 5 |
+
* The official UD/CoNLL-U validator
|
| 6 |
+
* The official UD parsing scorer from the CoNLL (2017, 2018) and IWPT (2020, 2021) shared tasks
|
| 7 |
+
|
| 8 |
+
## The official UD/CoNLL-U validator
|
| 9 |
+
|
| 10 |
+
Reads a CoNLL-U file and verifies that it complies with the UD specification. For more details on UD validation, visit
|
| 11 |
+
[the description](https://universaldependencies.org/contributing/validation.html) on the UD website.
|
| 12 |
+
|
| 13 |
+
The most up-to-date version of the validator always resides in the master branch of the
|
| 14 |
+
[tools](https://github.com/UniversalDependencies/tools) repository on GitHub. It is possible to run the script
|
| 15 |
+
`validate.py` from your local copy of the repository even without installing the `udtools` package via pip.
|
| 16 |
+
Nevertheless, you will need a few third-party modules the validator depends on. You can install them like this:
|
| 17 |
+
`pip install -r requirements.txt`.
|
| 18 |
+
|
| 19 |
+
If the root folder of the tools repository is in your system `PATH`, you do not have to be in that folder when
|
| 20 |
+
launching the script:
|
| 21 |
+
|
| 22 |
+
```
|
| 23 |
+
cat la_proiel-ud-train.conllu | python validate.py --lang la --max-err=0
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
You can run `python validate.py --help` for a list of available options.
|
| 27 |
+
|
| 28 |
+
### Invoking validation from your Python program
|
| 29 |
+
|
| 30 |
+
To use the validator from your Python code, first install `udtools` (possibly after creating and activating a virtual
|
| 31 |
+
environment). This should give you access to a fairly recent version of the validator but it will not necessarily be
|
| 32 |
+
the authoritative version, as it may lack some modifications of the language-specific data.
|
| 33 |
+
`pip install --upgrade udtools`
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
from udtools import Validator
|
| 37 |
+
|
| 38 |
+
validator = Validator(lang='la')
|
| 39 |
+
state = validator.validate_files(['la_proiel-ud-train.conllu', 'la_proiel-ud-dev.conllu', 'la_proiel-ud-test.conllu'])
|
| 40 |
+
print(state)
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
The state is an object with various pieces of information collected during the validation run. Its string
|
| 44 |
+
representation is a summary of the warnings and errors found, as well as the string "*** PASSED ***" or
|
| 45 |
+
"*** FAILED ***". You can also use the state in a boolean context (condition), where “passed” evaluates as `True` and
|
| 46 |
+
“failed” as `False`. Note however that the default behavior of the validator is still to print errors and warnings to
|
| 47 |
+
STDERR as soon as they are detected. To suppress printing, add `output=None` to the arguments when constructing the
|
| 48 |
+
validator. (The default value of this argument is `sys.stderr`. You could also set it to `sys.stdout` or to a handle
|
| 49 |
+
of a file open for writing.)
|
| 50 |
+
|
| 51 |
+
```python
|
| 52 |
+
from udtools import Validator
|
| 53 |
+
|
| 54 |
+
validator = Validator(lang='la', output=None)
|
| 55 |
+
state = validator.validate_files(['la_proiel-ud-train.conllu', 'la_proiel-ud-dev.conllu', 'la_proiel-ud-test.conllu'])
|
| 56 |
+
if state:
|
| 57 |
+
print('Yay!')
|
| 58 |
+
else:
|
| 59 |
+
print('Oh no ☹')
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
Alternatively, you could simulate supplying the `--quiet` option as if it came from the command line:
|
| 63 |
+
|
| 64 |
+
```python
|
| 65 |
+
import sys
|
| 66 |
+
from udtools.argparser import parse_args_validator
|
| 67 |
+
from udtools import Validator
|
| 68 |
+
|
| 69 |
+
sys.argv = ['validate.py', '--lang=la', '--quiet']
|
| 70 |
+
args = parse_args_validator()
|
| 71 |
+
validator = Validator(lang='la', args=args)
|
| 72 |
+
state = validator.validate_files(['la_proiel-ud-train.conllu', 'la_proiel-ud-dev.conllu', 'la_proiel-ud-test.conllu'])
|
| 73 |
+
if state:
|
| 74 |
+
print('Yay!')
|
| 75 |
+
else:
|
| 76 |
+
print('Oh no ☹')
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
Instead of printing the errors to STDERR as soon as they are found, you can have them saved in the validation state
|
| 80 |
+
and later process them the way you prefer. Note that if you use the argparser approach from the previous example, the
|
| 81 |
+
number of incidents saved (per category) is limited by default. This is to save your memory if you do not need to keep
|
| 82 |
+
the errors (some treebanks have hundreds of thousands of errors and warnings). By setting `--max-store=0`, this limit
|
| 83 |
+
is turned off. However, the default limit is set in the argparser, so if you use the simpler approach with
|
| 84 |
+
`output=None` and you do not invoke the argparser for other reasons, no limit will be imposed.
|
| 85 |
+
|
| 86 |
+
```python
|
| 87 |
+
from udtools import Validator
|
| 88 |
+
|
| 89 |
+
validator = Validator(lang='la', output=None)
|
| 90 |
+
state = validator.validate_files(['la_proiel-ud-train.conllu', 'la_proiel-ud-dev.conllu', 'la_proiel-ud-test.conllu'])
|
| 91 |
+
# Take only errors, skip warnings.
|
| 92 |
+
all_errors = [x for x in state.error_tracker if x.is_error()]
|
| 93 |
+
all_errors.sort(key=lambda incident: incident.testid)
|
| 94 |
+
for error in all_errors:
|
| 95 |
+
print(error)
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
### Entry points
|
| 99 |
+
|
| 100 |
+
The validator has several other entry points in addition to `validate_files()`:
|
| 101 |
+
|
| 102 |
+
* `validate_file()` takes just one file name (path), reads that file and tests its validity. If the file name is '-',
|
| 103 |
+
it is interpreted as reading from STDIN. Note that `validate_files()` calls `validate_file()` for each file in turn,
|
| 104 |
+
then it also calls `validate_end()` to perform checks that can only be done after the whole treebank has been read.
|
| 105 |
+
If you call directly `validate_file()`, you should take care of calling `validate_end()` yourself.
|
| 106 |
+
* `validate_end()` takes just the state from the validation performed so far, and checks that the observations saved
|
| 107 |
+
in the state are not in conflict.
|
| 108 |
+
* `validate_file_handle()` takes the object associated with an open file (or `sys.stdin`). Otherwise it is analogous
|
| 109 |
+
to `validate_file()` (and is in fact called from `validate_file()`).
|
| 110 |
+
* `validate_sentence()` takes a list of CoNLL-U lines corresponding to one sentence, including the sentence-terminating
|
| 111 |
+
empty line. When called from `validate_file_handle()`, it will have at most one empty line and this will be the last
|
| 112 |
+
line in the list, as it is how the file reader detected the sentence end. However, the method is aware that other
|
| 113 |
+
callers could supply lists with empty lines in the middle, and it will report an error if this happens.
|
| 114 |
+
|
| 115 |
+
All the `validate_*()` methods mentioned above return a `State` object. All of them can optionally take a `State` from
|
| 116 |
+
previous runs as an argument (named `state`), in which case they will base their decisions on this state, and save
|
| 117 |
+
their observations in it, too.
|
| 118 |
+
|
| 119 |
+
The validator uses data files with specifications of feature values, lemmas of auxiliaries etc. for each language.
|
| 120 |
+
These files change more often than the validator code itself, so it is likely that your pip-installed `udtools` does
|
| 121 |
+
not have the most up-to-date version. Therefore, you may want to have a local copy of the tools repository, regularly
|
| 122 |
+
update it by calling `git pull`, and tell the validator where to load the data files from (instead of its installation
|
| 123 |
+
location):
|
| 124 |
+
|
| 125 |
+
```python
|
| 126 |
+
validator = Validator(lang='la', datapath='/my/copy/of/ud/tools/data')
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
### Printing incidents in JSON
|
| 130 |
+
|
| 131 |
+
Instead of prose error messages suitable for human users, you can print the error descriptions in JSON so it can be
|
| 132 |
+
easily read and processed by an external application.
|
| 133 |
+
|
| 134 |
+
```python
|
| 135 |
+
from udtools import Validator
|
| 136 |
+
|
| 137 |
+
validator = Validator(lang='la', output=None)
|
| 138 |
+
state = validator.validate_files(['la_proiel-ud-train.conllu', 'la_proiel-ud-dev.conllu', 'la_proiel-ud-test.conllu'])
|
| 139 |
+
incidents = sorted(state.error_tracker, key=lambda incident: incident.testid)
|
| 140 |
+
print('[')
|
| 141 |
+
print(',\n'.join([incident.json() for incident in incidents]))
|
| 142 |
+
print(']')
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
### Selecting only some tests
|
| 146 |
+
|
| 147 |
+
UD defines several
|
| 148 |
+
[levels of validity](https://universaldependencies.org/contributing/validation-rules.html#levels-of-validity)
|
| 149 |
+
of CoNLL-U files. By default, validity on the highest level 5 is required; this is the level that UD treebanks must
|
| 150 |
+
pass in order to be released as part of Universal Dependencies. It is possible to request a lower level of validity,
|
| 151 |
+
for example, only the backbone file structure can be checked, omitting any linguistic checks of the annotation
|
| 152 |
+
guidelines. When invoking `validate.py` from the command line, the numeric option `--level` (e.g., `--level 1`)
|
| 153 |
+
tells the validator to skip tests on levels 2 and above. The same argument can be given directly to the constructor
|
| 154 |
+
of the `Validator` class. The lowest level is not specific to individual languages, so we can give the generic
|
| 155 |
+
language "ud" instead.
|
| 156 |
+
|
| 157 |
+
```python
|
| 158 |
+
validator = Validator(lang='ud', level=1)
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
One may want to filter the tests along various other dimensions: errors only (skipping warnings); selected test classes
|
| 162 |
+
(FORMAT, MORPHO, SYNTAX, ENHANCED, METADATA etc.); individual test ids (e.g., `obl-should-be-nmod`). It is always
|
| 163 |
+
possible to do what we showed above, i.e., collecting all incidents, then processing them and showing only the selected
|
| 164 |
+
ones. However, this approach has its drawbacks: We waste time by running tests whose results we do not want to see;
|
| 165 |
+
for large treebanks it is not practical to postpone showing first results until the whole treebank is processed; and
|
| 166 |
+
it may be also quite heavy to keep all unnecessary incidents in memory.
|
| 167 |
+
|
| 168 |
+
You may try to get around this by implementing your own alternative to `validate_sentence()` and call individual tests
|
| 169 |
+
directly. There are some dangers though, which you should consider first:
|
| 170 |
+
|
| 171 |
+
* The tests are not documented at present, so you have to consult the source code. The relevant functions are methods
|
| 172 |
+
of `Validator` and their names start with `check_` (as opposed to `validate_`, which signals the better documented
|
| 173 |
+
entry points). Note that one `check_` method may generate multiple different incident types, whose ids are not
|
| 174 |
+
reflected in the name of the method; and a few incidents can even occur outside any `check_` method (e.g., directly
|
| 175 |
+
in a `validate_` method).
|
| 176 |
+
* The interface is far from stable. Names of methods may change at any time, as well as the types of incidents they
|
| 177 |
+
generate, the arguments they expect, their return values (if any) or side effects. Some checks only look at
|
| 178 |
+
individual cells in the CoNLL-U tabular format, others expect the fully built tree structure.
|
| 179 |
+
* There are dependencies among the tests. Some `check_` methods can be run safely only if other `check_` methods have
|
| 180 |
+
been run previously and did not encounter errors.
|
| 181 |
+
|
| 182 |
+
### Adding your own tests
|
| 183 |
+
|
| 184 |
+
You may want to add language-specific consistency tests beyond what the official validator can do (e.g., ensuring that
|
| 185 |
+
all personal pronouns have a non-empty value of the `Person` feature), or even treebank/project-specific tests (e.g.,
|
| 186 |
+
all tokens should have a valid `Ref` attribute in MISC). One way of doing this would be to derive your own validator
|
| 187 |
+
class based on `udtools.Validator`.
|
| 188 |
+
|
| 189 |
+
```python
|
| 190 |
+
from udtools import Validator
|
| 191 |
+
from udtools.incident import TestClass, Error
|
| 192 |
+
|
| 193 |
+
class MyValidator(Validator):
|
| 194 |
+
|
| 195 |
+
def validate_sentence(self, lines, state=None):
|
| 196 |
+
state = super().validate_sentence(lines, state)
|
| 197 |
+
self.check_my_own_stuff(state, lines)
|
| 198 |
+
return state
|
| 199 |
+
|
| 200 |
+
def check_my_own_stuff(self, state, lines):
|
| 201 |
+
for line in lines:
|
| 202 |
+
if re.match(r'40\t', line):
|
| 203 |
+
Error(
|
| 204 |
+
state=state, config=self.incfg,
|
| 205 |
+
level=1,
|
| 206 |
+
testclass=TestClass.FORMAT,
|
| 207 |
+
testid='id-40',
|
| 208 |
+
message="Node ID 40 is not allowed in this treebank."
|
| 209 |
+
).confirm()
|
| 210 |
+
|
| 211 |
+
validator = MyValidator(lang='la')
|
| 212 |
+
state = validator.validate_files(['la_proiel-ud-train.conllu', 'la_proiel-ud-dev.conllu', 'la_proiel-ud-test.conllu'])
|
| 213 |
+
print(state)
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
## The official UD parsing scorer
|
| 219 |
+
|
| 220 |
+
Reads two CoNLL-U files: gold standard (annotated manually) and system output (predicted by a parsing model). Both
|
| 221 |
+
files must be valid at least at level 2, and their underlying text must be compatible, i.e., it can differ in
|
| 222 |
+
whitespace but not in other characters. The scorer evaluates similarity of the system output to the gold standard
|
| 223 |
+
by computing several metrics that were defined in the UD parsing shared tasks (CoNLL 2017 & 2018, IWPT 2020 & 2021).
|
| 224 |
+
|
| 225 |
+
To load two files and evaluate their similarity without enhanced dependencies (i.e., in the style of the CoNLL shared
|
| 226 |
+
tasks), you can do the following.
|
| 227 |
+
|
| 228 |
+
```python
|
| 229 |
+
from udtools.udeval import load_conllu_file, evaluate, build_evaluation_table
|
| 230 |
+
|
| 231 |
+
gold_ud = load_conllu_file('gold.conllu')
|
| 232 |
+
system_ud = load_conllu_file('system.conllu')
|
| 233 |
+
evaluation = evaluate(gold_ud, system_ud)
|
| 234 |
+
results = build_evaluation_table(evaluation, verbose=True)
|
| 235 |
+
print(results)
|
| 236 |
+
```
|
| 237 |
+
|
| 238 |
+
To use the command line interface and arguments, you can use `parse_args_scorer()` as shown below. If you supply
|
| 239 |
+
`--help` as the only argument, you will get the description of the options available.
|
| 240 |
+
|
| 241 |
+
```python
|
| 242 |
+
from udtools.argparser import parse_args_scorer
|
| 243 |
+
from udtools.udeval import evaluate_wrapper, build_evaluation_table
|
| 244 |
+
|
| 245 |
+
args = parse_args_scorer()
|
| 246 |
+
evaluation = evaluate_wrapper(args)
|
| 247 |
+
results = build_evaluation_table(evaluation, args.verbose, args.counts, args.enhanced)
|
| 248 |
+
print(results)
|
| 249 |
+
```
|
ud-tools/udtools/pyproject.toml
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["setuptools >= 77.0.3"]
|
| 3 |
+
build-backend = "setuptools.build_meta"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "udtools"
|
| 7 |
+
version = "0.2.7"
|
| 8 |
+
authors = [
|
| 9 |
+
{ name="Filip Ginter" },
|
| 10 |
+
{ name="Sampo Pyysalo" },
|
| 11 |
+
{ name="Daniel Zeman" },
|
| 12 |
+
{ name="Milan Straka" },
|
| 13 |
+
{ name="Martin Popel" },
|
| 14 |
+
{ name="Gosse Bouma" },
|
| 15 |
+
{ name="John Bauer" },
|
| 16 |
+
{ name="Arianna Masciolini" },
|
| 17 |
+
{ name="Ludovica Pannitto" },
|
| 18 |
+
]
|
| 19 |
+
maintainers = [
|
| 20 |
+
{ name="Daniel Zeman", email="zeman@ufal.mff.cuni.cz" },
|
| 21 |
+
]
|
| 22 |
+
description = "Python tools for Universal Dependencies"
|
| 23 |
+
readme = "README.md"
|
| 24 |
+
requires-python = ">=3.10"
|
| 25 |
+
dependencies = ["udapi>=0.5.0", "regex>=2020.09.27"]
|
| 26 |
+
classifiers = [
|
| 27 |
+
"Development Status :: 2 - Pre-Alpha",
|
| 28 |
+
"Intended Audience :: Science/Research",
|
| 29 |
+
"Programming Language :: Python :: 3",
|
| 30 |
+
"Operating System :: OS Independent",
|
| 31 |
+
]
|
| 32 |
+
license = "GPL-2.0-or-later"
|
| 33 |
+
license-files = ["LICEN[CS]E*"]
|
| 34 |
+
|
| 35 |
+
[project.urls]
|
| 36 |
+
Homepage = "https://universaldependencies.org/"
|
| 37 |
+
Issues = "https://github.com/UniversalDependencies/tools/issues"
|
| 38 |
+
|
| 39 |
+
[project.scripts]
|
| 40 |
+
udvalidate = "udtools.cli:main"
|
| 41 |
+
udeval = "udtools.cli:main_eval"
|
| 42 |
+
|
| 43 |
+
[tool.setuptools.packages.find]
|
| 44 |
+
where = ["src"]
|
| 45 |
+
|
| 46 |
+
[tool.setuptools.package-data]
|
| 47 |
+
udtools = ["data/*.json"]
|
| 48 |
+
|
ud-tools/udtools/src/udtools/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
udtools.
|
| 3 |
+
|
| 4 |
+
Python tools for Universal Dependencies.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
__version__ = "0.2.7"
|
| 8 |
+
__author__ = 'Daniel Zeman'
|
| 9 |
+
__credits__ = 'Universal Dependencies community'
|
| 10 |
+
|
| 11 |
+
from .validator import Validator
|
ud-tools/udtools/src/udtools/argparser.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 2 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 3 |
+
# DZ: Many subsequent changes. See the git history.
|
| 4 |
+
import sys
|
| 5 |
+
import argparse
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
#==============================================================================
|
| 9 |
+
# Argument processing for validate.py.
|
| 10 |
+
#==============================================================================
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def build_argparse_validator():
|
| 14 |
+
opt_parser = argparse.ArgumentParser(description="CoNLL-U validation script. Python 3 is needed to run it!")
|
| 15 |
+
|
| 16 |
+
io_group = opt_parser.add_argument_group("Input / output options")
|
| 17 |
+
io_group.add_argument('-q', '--quiet',
|
| 18 |
+
dest='quiet', action="store_true", default=False,
|
| 19 |
+
help="""Do not print anything (errors, warnings, summary).
|
| 20 |
+
Exit with 0 on pass, non-zero on fail.""")
|
| 21 |
+
io_group.add_argument('--no-warnings',
|
| 22 |
+
dest='no_warnings', action='store_true', default=False,
|
| 23 |
+
help="""Print only errors but no warnings.
|
| 24 |
+
The final summary will still include the number of warnings, although they were not printed.""")
|
| 25 |
+
io_group.add_argument('-e', '--exclude',
|
| 26 |
+
nargs='+',
|
| 27 |
+
help="""One or more ids of tests whose incidents should not be printed.
|
| 28 |
+
For example: --exclude missing-sent-id invalid-sent-id.
|
| 29 |
+
The tests will still be performed and resulting incidents counted in the final summary.""")
|
| 30 |
+
io_group.add_argument('-i', '--include-only',
|
| 31 |
+
nargs='+',
|
| 32 |
+
help="""One or more ids of tests whose incidents should be printed.
|
| 33 |
+
For example: --include-only missing-sent-id invalid-sent-id.
|
| 34 |
+
Any other incidents will not be printed.
|
| 35 |
+
The tests will still be performed and resulting incidents counted in the final summary.""")
|
| 36 |
+
io_group.add_argument('--max-err',
|
| 37 |
+
action='store', type=int, default=20,
|
| 38 |
+
help="""How many incidents to print per test class? 0 for all.
|
| 39 |
+
Default: %(default)d.""")
|
| 40 |
+
io_group.add_argument('input',
|
| 41 |
+
nargs='*',
|
| 42 |
+
help="""Input file name(s), or "-" or nothing for standard input.""")
|
| 43 |
+
|
| 44 |
+
test_group = opt_parser.add_argument_group("Test configuration options")
|
| 45 |
+
test_group.add_argument('--lang',
|
| 46 |
+
action='store', required=True, default=None,
|
| 47 |
+
help="""Which langauge are we checking (ISO 639 code)?
|
| 48 |
+
Determines the language-specific lists of features,
|
| 49 |
+
auxiliaries, relation subtypes etc. This is a required
|
| 50 |
+
argument. Use "ud" as the value if you only want to
|
| 51 |
+
test the universal part of the UD guidelines.""")
|
| 52 |
+
test_group.add_argument('--level',
|
| 53 |
+
action='store', type=int, default=5, dest="level",
|
| 54 |
+
help="""Level 1: Test only CoNLL-U backbone.
|
| 55 |
+
Level 2: UD format.
|
| 56 |
+
Level 3: UD contents.
|
| 57 |
+
Level 4: Language-specific labels.
|
| 58 |
+
Level 5 (default): Language-specific contents.""")
|
| 59 |
+
test_group.add_argument('--coref',
|
| 60 |
+
action='store_true', default=False, dest='check_coref',
|
| 61 |
+
help='Test coreference and entity-related annotation in MISC.')
|
| 62 |
+
return opt_parser
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def parse_args_validator(args=None):
|
| 67 |
+
"""
|
| 68 |
+
Creates an instance of the ArgumentParser and parses the command line
|
| 69 |
+
arguments.
|
| 70 |
+
|
| 71 |
+
Parameters
|
| 72 |
+
----------
|
| 73 |
+
args : list of strings, optional
|
| 74 |
+
If not supplied, the argument parser will read sys.args instead.
|
| 75 |
+
Otherwise the caller can supply list such as ['--lang', 'en'].
|
| 76 |
+
|
| 77 |
+
Returns
|
| 78 |
+
-------
|
| 79 |
+
args : argparse.Namespace
|
| 80 |
+
Values of individual arguments can be accessed as object properties
|
| 81 |
+
(using the dot notation). It is possible to convert it to a dict by
|
| 82 |
+
calling vars(args).
|
| 83 |
+
"""
|
| 84 |
+
opt_parser = build_argparse_validator()
|
| 85 |
+
args = opt_parser.parse_args(args=args)
|
| 86 |
+
# Level of validation.
|
| 87 |
+
if args.level < 1:
|
| 88 |
+
print(f'Option --level must not be less than 1; changing from {args.level} to 1',
|
| 89 |
+
file=sys.stderr)
|
| 90 |
+
args.level = 1
|
| 91 |
+
# No language-specific tests for levels 1-3.
|
| 92 |
+
# Anyways, any Feature=Value pair should be allowed at level 3 (because it may be language-specific),
|
| 93 |
+
# and any word form or lemma can contain spaces (because language-specific guidelines may allow it).
|
| 94 |
+
# We can also test language 'ud' on level 4; then it will require that no language-specific features are present.
|
| 95 |
+
if args.level < 4:
|
| 96 |
+
args.lang = 'ud'
|
| 97 |
+
if args.input == []:
|
| 98 |
+
args.input.append('-')
|
| 99 |
+
return args
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
#==============================================================================
|
| 103 |
+
# Argument processing for eval.py.
|
| 104 |
+
#==============================================================================
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def parse_args_scorer(args=None):
|
| 109 |
+
"""
|
| 110 |
+
Creates an instance of the ArgumentParser and parses the command line
|
| 111 |
+
arguments.
|
| 112 |
+
|
| 113 |
+
Parameters
|
| 114 |
+
----------
|
| 115 |
+
args : list of strings, optional
|
| 116 |
+
If not supplied, the argument parser will read sys.args instead.
|
| 117 |
+
Otherwise the caller can supply list such as ['--lang', 'en'].
|
| 118 |
+
|
| 119 |
+
Returns
|
| 120 |
+
-------
|
| 121 |
+
args : argparse.Namespace
|
| 122 |
+
Values of individual arguments can be accessed as object properties
|
| 123 |
+
(using the dot notation). It is possible to convert it to a dict by
|
| 124 |
+
calling vars(args).
|
| 125 |
+
"""
|
| 126 |
+
parser = argparse.ArgumentParser()
|
| 127 |
+
parser.add_argument('gold_file', type=str,
|
| 128 |
+
help='Name of the CoNLL-U file with the gold data.')
|
| 129 |
+
parser.add_argument('system_file', type=str,
|
| 130 |
+
help='Name of the CoNLL-U file with the predicted data.')
|
| 131 |
+
parser.add_argument('--verbose', '-v', default=False, action='store_true',
|
| 132 |
+
help='Print all metrics.')
|
| 133 |
+
parser.add_argument('--counts', '-c', default=False, action='store_true',
|
| 134 |
+
help='Print raw counts of correct/gold/system/aligned words instead of precision/recall/F1 for all metrics.')
|
| 135 |
+
parser.add_argument('--no-enhanced', dest='enhanced', action='store_false', default=True,
|
| 136 |
+
help='Turn off evaluation of enhanced dependencies.')
|
| 137 |
+
parser.add_argument('--enhancements', type=str, default='0',
|
| 138 |
+
help='Level of enhancements in the gold data (see guidelines) 0=all (default), 1=no gapping, 2=no shared parents, 3=no shared dependents 4=no control, 5=no external arguments, 6=no lemma info, combinations: 12=both 1 and 2 apply, etc.')
|
| 139 |
+
parser.add_argument('--no-empty-nodes', default=False,
|
| 140 |
+
help='Empty nodes have been collapsed (needed to correctly evaluate enhanced/gapping). Raise exception if an empty node is encountered.')
|
| 141 |
+
parser.add_argument('--multiple-roots-okay', default=False, action='store_true',
|
| 142 |
+
help='A single sentence can have multiple nodes with HEAD=0.')
|
| 143 |
+
args = parser.parse_args(args=args)
|
| 144 |
+
return args
|
ud-tools/udtools/src/udtools/cli.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
# 2025-08-31: Refactoring by @AngledLuffa
|
| 6 |
+
# 2025-09: Refactoring by @harisont and @ellepannitto
|
| 7 |
+
import sys
|
| 8 |
+
from udtools.argparser import parse_args_validator, parse_args_scorer
|
| 9 |
+
from udtools.validator import Validator
|
| 10 |
+
from udtools.udeval import evaluate_wrapper, build_evaluation_table
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
#==============================================================================
|
| 15 |
+
# The main function.
|
| 16 |
+
#==============================================================================
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def main():
|
| 21 |
+
args = parse_args_validator()
|
| 22 |
+
validator = Validator(lang=args.lang, level=args.level, max_store=10, args=args)
|
| 23 |
+
state = validator.validate_files(args.input)
|
| 24 |
+
# Summarize the warnings and errors.
|
| 25 |
+
summary = str(state)
|
| 26 |
+
if not args.quiet:
|
| 27 |
+
print(summary, file=sys.stderr)
|
| 28 |
+
if state.passed():
|
| 29 |
+
return 0
|
| 30 |
+
else:
|
| 31 |
+
return 1
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def main_eval():
|
| 36 |
+
# Parse arguments
|
| 37 |
+
args = parse_args_scorer()
|
| 38 |
+
|
| 39 |
+
# Evaluate
|
| 40 |
+
evaluation = evaluate_wrapper(args)
|
| 41 |
+
results = build_evaluation_table(evaluation, args.verbose, args.counts, args.enhanced)
|
| 42 |
+
print(results)
|
| 43 |
+
return 0
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
if __name__=="__main__":
|
| 48 |
+
errcode = main()
|
| 49 |
+
sys.exit(errcode)
|
ud-tools/udtools/src/udtools/data.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path
|
| 2 |
+
# According to https://stackoverflow.com/questions/1832893/python-regex-matching-unicode-properties,
|
| 3 |
+
# the regex module has the same API as re but it can check Unicode character properties using \p{}
|
| 4 |
+
# as in Perl.
|
| 5 |
+
#import re
|
| 6 |
+
import regex as re
|
| 7 |
+
import json
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Data:
|
| 12 |
+
"""
|
| 13 |
+
The Data class holds various dictionaries of tags, auxiliaries, regular
|
| 14 |
+
expressions etc. needed for detailed testing, especially for language-
|
| 15 |
+
specific constraints.
|
| 16 |
+
"""
|
| 17 |
+
def __init__(self, datapath=None):
|
| 18 |
+
if datapath:
|
| 19 |
+
self.datapath = datapath
|
| 20 |
+
else:
|
| 21 |
+
# The folder where this module resides.
|
| 22 |
+
THISDIR = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
|
| 23 |
+
# If this module was imported directly from the root folder of the
|
| 24 |
+
# tools repository, the data folder should be a first-level subfolder
|
| 25 |
+
# there. Otherwise, the module is taken from installed udtools and
|
| 26 |
+
# the data is a subfolder here.
|
| 27 |
+
self.datapath = os.path.join(THISDIR, '..', '..', '..', 'data')
|
| 28 |
+
if not os.path.exists(self.datapath):
|
| 29 |
+
self.datapath = os.path.join(THISDIR, 'data')
|
| 30 |
+
# Universal part of speech tags in the UPOS column. Just a set.
|
| 31 |
+
# For consistency, they are also read from a file. But these tags do
|
| 32 |
+
# not change, so they could be even hard-coded here.
|
| 33 |
+
self.upos = set()
|
| 34 |
+
# Morphological features in the FEATS column.
|
| 35 |
+
# Key: language code; value: feature-value-UPOS data from feats.json.
|
| 36 |
+
self.feats = {}
|
| 37 |
+
# Universal dependency relation types (without subtypes) in the DEPREL
|
| 38 |
+
# column. For consistency, they are also read from a file. but these
|
| 39 |
+
# labels do not change, so they could be even hard-coded here.
|
| 40 |
+
self.udeprel = set()
|
| 41 |
+
# Dependency relation types in the DEPREL column.
|
| 42 |
+
# Key: language code; value: deprel data from deprels.json.
|
| 43 |
+
# Cached processed version: key: language code; value: set of deprels.
|
| 44 |
+
self.deprel = {}
|
| 45 |
+
self.cached_deprel_for_language = {}
|
| 46 |
+
# Enhanced dependency relation types in the DEPS column.
|
| 47 |
+
# Key: language code; value: edeprel data from edeprels.json.
|
| 48 |
+
# Cached processed version: key: language code; value: set of edeprels.
|
| 49 |
+
self.edeprel = {}
|
| 50 |
+
self.cached_edeprel_for_language = {}
|
| 51 |
+
# Auxiliary (and copula) lemmas in the LEMMA column.
|
| 52 |
+
# Key: language code; value: auxiliary/copula data from data.json.
|
| 53 |
+
# Cached processed versions: key: language code; value: list of lemmas.
|
| 54 |
+
self.auxcop = {}
|
| 55 |
+
self.cached_aux_for_language = {}
|
| 56 |
+
self.cached_cop_for_language = {}
|
| 57 |
+
# Tokens with spaces in the FORM and LEMMA columns.
|
| 58 |
+
# Key: language code; value: data from tospace.json.
|
| 59 |
+
self.tospace = {}
|
| 60 |
+
# Load language-specific data from external JSON files.
|
| 61 |
+
self.load()
|
| 62 |
+
# For each of the language-specific lists, we can generate an
|
| 63 |
+
# explanation for the user in case they use something that is not on
|
| 64 |
+
# the list. The explanation will be printed only once but the explain
|
| 65 |
+
# function may be called thousand times, so let us cache the output to
|
| 66 |
+
# reduce the time waste a little.
|
| 67 |
+
self._explanation_feats = {}
|
| 68 |
+
self._explanation_deprel = {}
|
| 69 |
+
self._explanation_edeprel = {}
|
| 70 |
+
self._explanation_aux = {}
|
| 71 |
+
self._explanation_cop = {}
|
| 72 |
+
self._explanation_tospace = {}
|
| 73 |
+
|
| 74 |
+
def get_feats_for_language(self, lcode):
|
| 75 |
+
"""
|
| 76 |
+
Searches the previously loaded database of feature-value-UPOS combinations.
|
| 77 |
+
Returns the data for a given language code, organized in dictionaries.
|
| 78 |
+
Returns an empty dict if there are no data for the given language code.
|
| 79 |
+
"""
|
| 80 |
+
###!!! If lcode is 'ud', we should permit all universal feature-value pairs,
|
| 81 |
+
###!!! regardless of language-specific documentation.
|
| 82 |
+
# Do not crash if the user asks for an unknown language.
|
| 83 |
+
if not lcode in self.feats:
|
| 84 |
+
return {}
|
| 85 |
+
return self.feats[lcode]
|
| 86 |
+
|
| 87 |
+
def get_deprel_for_language(self, lcode):
|
| 88 |
+
"""
|
| 89 |
+
Searches the previously loaded database of dependency relation labels.
|
| 90 |
+
Returns the set of permitted deprels for a given language code. Also
|
| 91 |
+
saves the result in self so that next time it can be fetched quickly
|
| 92 |
+
(once we loaded the data, we do not expect them to change).
|
| 93 |
+
"""
|
| 94 |
+
if lcode in self.cached_deprel_for_language:
|
| 95 |
+
return self.cached_deprel_for_language[lcode]
|
| 96 |
+
deprelset = set()
|
| 97 |
+
# If lcode is 'ud', we should permit all universal dependency relations,
|
| 98 |
+
# regardless of language-specific documentation.
|
| 99 |
+
if lcode == 'ud':
|
| 100 |
+
deprelset = self.udeprel
|
| 101 |
+
elif lcode in self.deprel:
|
| 102 |
+
for r in self.deprel[lcode]:
|
| 103 |
+
if self.deprel[lcode][r]['permitted'] > 0:
|
| 104 |
+
deprelset.add(r)
|
| 105 |
+
self.cached_deprel_for_language[lcode] = deprelset
|
| 106 |
+
return deprelset
|
| 107 |
+
|
| 108 |
+
def get_edeprel_for_language(self, lcode):
|
| 109 |
+
"""
|
| 110 |
+
Searches the previously loaded database of enhanced case markers.
|
| 111 |
+
Returns the set of permitted edeprels for a given language code. Also
|
| 112 |
+
saves the result in self so that next time it can be fetched quickly
|
| 113 |
+
(once we loaded the data, we do not expect them to change).
|
| 114 |
+
"""
|
| 115 |
+
if lcode in self.cached_edeprel_for_language:
|
| 116 |
+
return self.cached_edeprel_for_language[lcode]
|
| 117 |
+
basic_deprels = self.get_deprel_for_language(lcode)
|
| 118 |
+
edeprelset = basic_deprels | {'ref'}
|
| 119 |
+
for bdeprel in basic_deprels:
|
| 120 |
+
if re.match(r"^[nc]subj(:|$)", bdeprel):
|
| 121 |
+
edeprelset.add(bdeprel+':xsubj')
|
| 122 |
+
if lcode in self.edeprel:
|
| 123 |
+
for c in self.edeprel[lcode]:
|
| 124 |
+
for deprel in self.edeprel[lcode][c]['extends']:
|
| 125 |
+
for bdeprel in basic_deprels:
|
| 126 |
+
if bdeprel == deprel or re.match(r"^"+deprel+':', bdeprel):
|
| 127 |
+
edeprelset.add(bdeprel+':'+c)
|
| 128 |
+
self.cached_edeprel_for_language[lcode] = edeprelset
|
| 129 |
+
return edeprelset
|
| 130 |
+
|
| 131 |
+
def get_auxcop_for_language(self, lcode):
|
| 132 |
+
"""
|
| 133 |
+
Searches the previously loaded database of auxiliary/copula lemmas.
|
| 134 |
+
Returns the AUX and COP lists for a given language code. Also saves
|
| 135 |
+
the result in self so that next time it can be fetched quickly (once
|
| 136 |
+
we loaded the data, we do not expect them to change).
|
| 137 |
+
"""
|
| 138 |
+
if lcode in self.cached_aux_for_language and lcode in self.cached_cop_for_language:
|
| 139 |
+
return self.cached_aux_for_language[lcode], self.cached_cop_for_language[lcode]
|
| 140 |
+
# If any of the functions of the lemma is other than cop.PRON, it counts as an auxiliary.
|
| 141 |
+
# If any of the functions of the lemma is cop.*, it counts as a copula.
|
| 142 |
+
auxlist = []
|
| 143 |
+
coplist = []
|
| 144 |
+
lemmalist = self.auxcop.get(lcode, {}).keys()
|
| 145 |
+
auxlist = [x for x in lemmalist
|
| 146 |
+
if len([y for y in self.auxcop[lcode][x]['functions']
|
| 147 |
+
if y['function'] != 'cop.PRON']) > 0]
|
| 148 |
+
coplist = [x for x in lemmalist
|
| 149 |
+
if len([y for y in self.auxcop[lcode][x]['functions']
|
| 150 |
+
if re.match(r"^cop\.", y['function'])]) > 0]
|
| 151 |
+
self.cached_aux_for_language[lcode] = auxlist
|
| 152 |
+
self.cached_cop_for_language[lcode] = coplist
|
| 153 |
+
return auxlist, coplist
|
| 154 |
+
|
| 155 |
+
def get_aux_for_language(self, lcode):
|
| 156 |
+
"""
|
| 157 |
+
An entry point for get_auxcop_for_language() that returns only the aux
|
| 158 |
+
list. It either takes the cached list (if available), or calls
|
| 159 |
+
get_auxcop_for_language().
|
| 160 |
+
"""
|
| 161 |
+
if lcode in self.cached_aux_for_language:
|
| 162 |
+
return self.cached_aux_for_language[lcode]
|
| 163 |
+
auxlist, coplist = self.get_auxcop_for_language(lcode)
|
| 164 |
+
return auxlist
|
| 165 |
+
|
| 166 |
+
def get_cop_for_language(self, lcode):
|
| 167 |
+
"""
|
| 168 |
+
An entry point for get_auxcop_for_language() that returns only the cop
|
| 169 |
+
list. It either takes the cached list (if available), or calls
|
| 170 |
+
get_auxcop_for_language().
|
| 171 |
+
"""
|
| 172 |
+
if lcode in self.cached_cop_for_language:
|
| 173 |
+
return self.cached_cop_for_language[lcode]
|
| 174 |
+
auxlist, coplist = self.get_auxcop_for_language(lcode)
|
| 175 |
+
return coplist
|
| 176 |
+
|
| 177 |
+
def get_tospace_for_language(self, lcode):
|
| 178 |
+
"""
|
| 179 |
+
Searches the previously loaded database of regular expressions describing
|
| 180 |
+
permitted tokens with spaces. Returns the expressions for a given language code.
|
| 181 |
+
"""
|
| 182 |
+
# Do not crash if the user asks for an unknown language.
|
| 183 |
+
if not lcode in self.tospace:
|
| 184 |
+
return None
|
| 185 |
+
return self.tospace[lcode]
|
| 186 |
+
|
| 187 |
+
def explain_feats(self, lcode):
|
| 188 |
+
"""
|
| 189 |
+
Returns explanation message for features of a particular language.
|
| 190 |
+
To be called after language-specific features have been loaded.
|
| 191 |
+
"""
|
| 192 |
+
if lcode in self._explanation_feats:
|
| 193 |
+
return self._explanation_feats[lcode]
|
| 194 |
+
featset = self.get_feats_for_language(lcode)
|
| 195 |
+
# Prepare a global message about permitted features and values. We will add
|
| 196 |
+
# it to the first error message about an unknown feature. Note that this
|
| 197 |
+
# global information pertains to the default validation language and it
|
| 198 |
+
# should not be used with code-switched segments in alternative languages.
|
| 199 |
+
msg = ''
|
| 200 |
+
if not lcode in self.feats:
|
| 201 |
+
msg += f"No feature-value pairs have been permitted for language [{lcode}].\n"
|
| 202 |
+
msg += "They can be permitted at the address below (if the language has an ISO code and is registered with UD):\n"
|
| 203 |
+
msg += "https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_feature.pl\n"
|
| 204 |
+
else:
|
| 205 |
+
# Identify feature values that are permitted in the current language.
|
| 206 |
+
for f in featset:
|
| 207 |
+
for e in featset[f]['errors']:
|
| 208 |
+
msg += f"ERROR in _{lcode}/feat/{f}.md: {e}\n"
|
| 209 |
+
res = set()
|
| 210 |
+
for f in featset:
|
| 211 |
+
if featset[f]['permitted'] > 0:
|
| 212 |
+
for v in featset[f]['uvalues']:
|
| 213 |
+
res.add(f+'='+v)
|
| 214 |
+
for v in featset[f]['lvalues']:
|
| 215 |
+
res.add(f+'='+v)
|
| 216 |
+
sorted_documented_features = sorted(res)
|
| 217 |
+
msg += f"The following {len(sorted_documented_features)} feature values are currently permitted in language [{lcode}]:\n"
|
| 218 |
+
msg += ', '.join(sorted_documented_features) + "\n"
|
| 219 |
+
msg += "If a language needs a feature that is not documented in the universal guidelines, the feature must\n"
|
| 220 |
+
msg += "have a language-specific documentation page in a prescribed format.\n"
|
| 221 |
+
msg += "See https://universaldependencies.org/contributing_language_specific.html for further guidelines.\n"
|
| 222 |
+
msg += "All features including universal must be specifically turned on for each language in which they are used.\n"
|
| 223 |
+
msg += "See https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_feature.pl for details.\n"
|
| 224 |
+
self._explanation_feats[lcode] = msg
|
| 225 |
+
return msg
|
| 226 |
+
|
| 227 |
+
def explain_deprel(self, lcode):
|
| 228 |
+
"""
|
| 229 |
+
Returns explanation message for deprels of a particular language.
|
| 230 |
+
To be called after language-specific deprels have been loaded.
|
| 231 |
+
"""
|
| 232 |
+
if lcode in self._explanation_deprel:
|
| 233 |
+
return self._explanation_deprel[lcode]
|
| 234 |
+
deprelset = self.get_deprel_for_language(lcode)
|
| 235 |
+
# Prepare a global message about permitted relation labels. We will add
|
| 236 |
+
# it to the first error message about an unknown relation. Note that this
|
| 237 |
+
# global information pertains to the default validation language and it
|
| 238 |
+
# should not be used with code-switched segments in alternative languages.
|
| 239 |
+
msg = ''
|
| 240 |
+
if len(deprelset) == 0:
|
| 241 |
+
msg += f"No dependency relation types have been permitted for language [{lcode}].\n"
|
| 242 |
+
msg += "They can be permitted at the address below (if the language has an ISO code and is registered with UD):\n"
|
| 243 |
+
msg += "https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_deprel.pl\n"
|
| 244 |
+
else:
|
| 245 |
+
# Identify dependency relations that are permitted in the current language.
|
| 246 |
+
# If there are errors in documentation, identify the erroneous doc file.
|
| 247 |
+
# Note that data.deprel[lcode] may not exist even though we have a non-empty
|
| 248 |
+
# set of relations, if lcode is 'ud'.
|
| 249 |
+
if lcode in self.deprel:
|
| 250 |
+
for r in self.deprel[lcode]:
|
| 251 |
+
file = re.sub(r':', r'-', r)
|
| 252 |
+
if file == 'aux':
|
| 253 |
+
file = 'aux_'
|
| 254 |
+
for e in self.deprel[lcode][r]['errors']:
|
| 255 |
+
msg += f"ERROR in _{lcode}/dep/{file}.md: {e}\n"
|
| 256 |
+
sorted_documented_relations = sorted(deprelset)
|
| 257 |
+
msg += f"The following {len(sorted_documented_relations)} relations are currently permitted in language [{lcode}]:\n"
|
| 258 |
+
msg += ', '.join(sorted_documented_relations) + "\n"
|
| 259 |
+
msg += "If a language needs a relation subtype that is not documented in the universal guidelines, the relation\n"
|
| 260 |
+
msg += "must have a language-specific documentation page in a prescribed format.\n"
|
| 261 |
+
msg += "See https://universaldependencies.org/contributing_language_specific.html for further guidelines.\n"
|
| 262 |
+
msg += "Documented dependency relations can be specifically turned on/off for each language in which they are used.\n"
|
| 263 |
+
msg += "See https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_deprel.pl for details.\n"
|
| 264 |
+
self._explanation_deprel[lcode] = msg
|
| 265 |
+
return msg
|
| 266 |
+
|
| 267 |
+
def explain_edeprel(self, lcode):
|
| 268 |
+
"""
|
| 269 |
+
Returns explanation message for edeprels of a particular language.
|
| 270 |
+
To be called after language-specific edeprels have been loaded.
|
| 271 |
+
"""
|
| 272 |
+
if lcode in self._explanation_edeprel:
|
| 273 |
+
return self._explanation_edeprel[lcode]
|
| 274 |
+
edeprelset = self.get_edeprel_for_language(lcode)
|
| 275 |
+
# Prepare a global message about permitted relation labels. We will add
|
| 276 |
+
# it to the first error message about an unknown relation. Note that this
|
| 277 |
+
# global information pertains to the default validation language and it
|
| 278 |
+
# should not be used with code-switched segments in alternative languages.
|
| 279 |
+
msg = ''
|
| 280 |
+
if len(edeprelset) == 0:
|
| 281 |
+
msg += f"No enhanced dependency relation types (case markers) have been permitted for language [{lcode}].\n"
|
| 282 |
+
msg += "They can be permitted at the address below (if the language has an ISO code and is registered with UD):\n"
|
| 283 |
+
msg += "https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_edeprel.pl\n"
|
| 284 |
+
else:
|
| 285 |
+
# Identify dependency relations that are permitted in the current language.
|
| 286 |
+
# If there are errors in documentation, identify the erroneous doc file.
|
| 287 |
+
# Note that data.deprel[lcode] may not exist even though we have a non-empty
|
| 288 |
+
# set of relations, if lcode is 'ud'.
|
| 289 |
+
sorted_case_markers = sorted(edeprelset)
|
| 290 |
+
msg += f"The following {len(sorted_case_markers)} enhanced relations are currently permitted in language [{lcode}]:\n"
|
| 291 |
+
msg += ', '.join(sorted_case_markers) + "\n"
|
| 292 |
+
msg += "See https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_edeprel.pl for details.\n"
|
| 293 |
+
self._explanation_deprel[lcode] = msg
|
| 294 |
+
return msg
|
| 295 |
+
|
| 296 |
+
def explain_aux(self, lcode):
|
| 297 |
+
"""
|
| 298 |
+
Returns explanation message for auxiliaries of a particular language.
|
| 299 |
+
To be called after language-specific auxiliaries have been loaded.
|
| 300 |
+
"""
|
| 301 |
+
if lcode in self._explanation_aux:
|
| 302 |
+
return self._explanation_aux[lcode]
|
| 303 |
+
auxdata = self.get_aux_for_language(lcode)
|
| 304 |
+
# Prepare a global message about permitted auxiliary lemmas. We will add
|
| 305 |
+
# it to the first error message about an unknown auxiliary. Note that this
|
| 306 |
+
# global information pertains to the default validation language and it
|
| 307 |
+
# should not be used with code-switched segments in alternative languages.
|
| 308 |
+
msg = ''
|
| 309 |
+
if len(auxdata) == 0:
|
| 310 |
+
msg += f"No auxiliaries have been documented at the address below for language [{lcode}].\n"
|
| 311 |
+
msg += f"https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_auxiliary.pl?lcode={lcode}\n"
|
| 312 |
+
else:
|
| 313 |
+
# Identify auxiliaries that are permitted in the current language.
|
| 314 |
+
msg += f"The following {len(auxdata)} auxiliaries are currently documented in language [{lcode}]:\n"
|
| 315 |
+
msg += ', '.join(auxdata) + "\n"
|
| 316 |
+
msg += f"See https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_auxiliary.pl?lcode={lcode} for details.\n"
|
| 317 |
+
self._explanation_aux[lcode] = msg
|
| 318 |
+
return msg
|
| 319 |
+
|
| 320 |
+
def explain_cop(self, lcode):
|
| 321 |
+
"""
|
| 322 |
+
Returns explanation message for copulas of a particular language.
|
| 323 |
+
To be called after language-specific copulas have been loaded.
|
| 324 |
+
"""
|
| 325 |
+
if lcode in self._explanation_cop:
|
| 326 |
+
return self._explanation_cop[lcode]
|
| 327 |
+
copdata = self.get_cop_for_language(lcode)
|
| 328 |
+
# Prepare a global message about permitted copula lemmas. We will add
|
| 329 |
+
# it to the first error message about an unknown copula. Note that this
|
| 330 |
+
# global information pertains to the default validation language and it
|
| 331 |
+
# should not be used with code-switched segments in alternative languages.
|
| 332 |
+
msg = ''
|
| 333 |
+
if len(copdata) == 0:
|
| 334 |
+
msg += f"No copulas have been documented at the address below for language [{lcode}].\n"
|
| 335 |
+
msg += f"https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_auxiliary.pl?lcode={lcode}\n"
|
| 336 |
+
else:
|
| 337 |
+
# Identify auxiliaries that are permitted in the current language.
|
| 338 |
+
msg += f"The following {len(copdata)} copulas are currently documented in language [{lcode}]:\n"
|
| 339 |
+
msg += ', '.join(copdata) + "\n"
|
| 340 |
+
msg += f"See https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_auxiliary.pl?lcode={lcode} for details.\n"
|
| 341 |
+
self._explanation_cop[lcode] = msg
|
| 342 |
+
return msg
|
| 343 |
+
|
| 344 |
+
def explain_tospace(self, lcode):
|
| 345 |
+
"""
|
| 346 |
+
Returns explanation message for tokens with spaces of a particular language.
|
| 347 |
+
To be called after language-specific tokens with spaces have been loaded.
|
| 348 |
+
"""
|
| 349 |
+
if lcode in self._explanation_tospace:
|
| 350 |
+
return self._explanation_tospace[lcode]
|
| 351 |
+
# Prepare a global message about permitted features and values. We will add
|
| 352 |
+
# it to the first error message about an unknown token with space. Note that
|
| 353 |
+
# this global information pertains to the default validation language and it
|
| 354 |
+
# should not be used with code-switched segments in alternative languages.
|
| 355 |
+
msg = ''
|
| 356 |
+
if not lcode in self.tospace:
|
| 357 |
+
msg += f"No tokens with spaces have been permitted for language [{lcode}].\n"
|
| 358 |
+
msg += "They can be permitted at the address below (if the language has an ISO code and is registered with UD):\n"
|
| 359 |
+
msg += "https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_token_with_space.pl\n"
|
| 360 |
+
else:
|
| 361 |
+
msg += f"Only tokens and lemmas matching the following regular expression are currently permitted to contain spaces in language [{lcode}]:\n"
|
| 362 |
+
msg += self.tospace[lcode][0]
|
| 363 |
+
msg += "\nOthers can be permitted at the address below (if the language has an ISO code and is registered with UD):\n"
|
| 364 |
+
msg += "https://quest.ms.mff.cuni.cz/udvalidator/cgi-bin/unidep/langspec/specify_token_with_space.pl\n"
|
| 365 |
+
self._explanation_tospace[lcode] = msg
|
| 366 |
+
return msg
|
| 367 |
+
|
| 368 |
+
def load(self):
|
| 369 |
+
"""
|
| 370 |
+
Loads the external validation data such as permitted feature-value
|
| 371 |
+
combinations, and stores them in self. The source JSON files are
|
| 372 |
+
supposed to be in the data subfolder of the folder where the script
|
| 373 |
+
lives.
|
| 374 |
+
"""
|
| 375 |
+
with open(os.path.join(self.datapath, 'upos.json'), 'r', encoding='utf-8') as f:
|
| 376 |
+
contents = json.load(f)
|
| 377 |
+
upos_list = contents['upos']
|
| 378 |
+
self.upos = set(upos_list)
|
| 379 |
+
with open(os.path.join(self.datapath, 'feats.json'), 'r', encoding='utf-8') as f:
|
| 380 |
+
contents = json.load(f)
|
| 381 |
+
self.feats = contents['features']
|
| 382 |
+
with open(os.path.join(self.datapath, 'udeprels.json'), 'r', encoding='utf-8') as f:
|
| 383 |
+
contents = json.load(f)
|
| 384 |
+
udeprel_list = contents['udeprels']
|
| 385 |
+
self.udeprel = set(udeprel_list)
|
| 386 |
+
with open(os.path.join(self.datapath, 'deprels.json'), 'r', encoding='utf-8') as f:
|
| 387 |
+
contents = json.load(f)
|
| 388 |
+
self.deprel = contents['deprels']
|
| 389 |
+
with open(os.path.join(self.datapath, 'edeprels.json'), 'r', encoding='utf-8') as f:
|
| 390 |
+
contents = json.load(f)
|
| 391 |
+
self.edeprel = contents['edeprels']
|
| 392 |
+
with open(os.path.join(self.datapath, 'data.json'), 'r', encoding='utf-8') as f:
|
| 393 |
+
contents = json.load(f)
|
| 394 |
+
self.auxcop = contents['auxiliaries']
|
| 395 |
+
with open(os.path.join(self.datapath, 'tospace.json'), 'r', encoding='utf-8') as f:
|
| 396 |
+
contents = json.load(f)
|
| 397 |
+
# There is one or more regular expressions for each language in the file.
|
| 398 |
+
# If there are multiple expressions, combine them in one and compile it.
|
| 399 |
+
self.tospace = {}
|
| 400 |
+
for l in contents['expressions']:
|
| 401 |
+
combination = '('+'|'.join(sorted(list(contents['expressions'][l])))+')'
|
| 402 |
+
compilation = re.compile(combination)
|
| 403 |
+
self.tospace[l] = (combination, compilation)
|
ud-tools/udtools/src/udtools/incident.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from enum import Enum
|
| 3 |
+
from json import JSONEncoder
|
| 4 |
+
|
| 5 |
+
jenc = JSONEncoder()
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestClass(Enum):
|
| 10 |
+
INTERNAL = 0
|
| 11 |
+
UNICODE = 1
|
| 12 |
+
FORMAT = 2
|
| 13 |
+
MORPHO = 3
|
| 14 |
+
SYNTAX = 4
|
| 15 |
+
ENHANCED = 5
|
| 16 |
+
COREF = 6
|
| 17 |
+
METADATA = 7
|
| 18 |
+
|
| 19 |
+
def __str__(self):
|
| 20 |
+
return self.name
|
| 21 |
+
|
| 22 |
+
def __lt__(self, other):
|
| 23 |
+
return self.value < other.value
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class IncidentType(Enum):
|
| 28 |
+
ERROR = 1
|
| 29 |
+
WARNING = 0
|
| 30 |
+
|
| 31 |
+
def __str__(self):
|
| 32 |
+
return self.name
|
| 33 |
+
|
| 34 |
+
def __lt__(self, other):
|
| 35 |
+
return self.value < other.value
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass(order=True)
|
| 40 |
+
class Reference:
|
| 41 |
+
"""
|
| 42 |
+
Points to a position in the source file. Each incident (error or warning)
|
| 43 |
+
has always a main anchor (reference) that is reported with the error.
|
| 44 |
+
However, some errors may also relate to additional positions in the file,
|
| 45 |
+
which can be then given as a list of Reference objects. As a minimum,
|
| 46 |
+
a reference indicates the line number. If there were multiple input files,
|
| 47 |
+
it should also indicate the file name, as line numbers reset when new file
|
| 48 |
+
starts. Optionally, references can also indicate the sentence and node ids
|
| 49 |
+
(if pointing to a node) to facilitate locating the target. Finally, the
|
| 50 |
+
reference can include a comment which explain its relation to the incident.
|
| 51 |
+
"""
|
| 52 |
+
filename: str
|
| 53 |
+
lineno: int
|
| 54 |
+
sentid: str = ''
|
| 55 |
+
nodeid: str = ''
|
| 56 |
+
comment: str = ''
|
| 57 |
+
|
| 58 |
+
def json(self):
|
| 59 |
+
"""
|
| 60 |
+
Returns the reference description in JSON format so it can be passed to
|
| 61 |
+
external applications easily.
|
| 62 |
+
"""
|
| 63 |
+
jsonlist = []
|
| 64 |
+
jsonlist.append(f'"filename": {jenc.encode(str(self.filename))}')
|
| 65 |
+
jsonlist.append(f'"lineno": "{str(self.lineno)}"')
|
| 66 |
+
jsonlist.append(f'"sentid": {jenc.encode(str(self.sentid))}')
|
| 67 |
+
jsonlist.append(f'"nodeid": "{str(self.nodeid)}"')
|
| 68 |
+
jsonlist.append(f'"comment": {jenc.encode(self.comment)}')
|
| 69 |
+
return '{' + ', '.join(jsonlist) + '}'
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class Incident:
|
| 74 |
+
"""
|
| 75 |
+
Instances of this class describe individual errors or warnings in the input
|
| 76 |
+
file.
|
| 77 |
+
"""
|
| 78 |
+
# We can modify the class-level defaults before a batch of similar tests.
|
| 79 |
+
# Then we do not have to repeat the shared parameters for each test.
|
| 80 |
+
default_level = 1
|
| 81 |
+
default_testclass = TestClass.FORMAT
|
| 82 |
+
default_testid = 'generic-error'
|
| 83 |
+
default_message = 'No error description provided.'
|
| 84 |
+
default_lineno = None
|
| 85 |
+
def __init__(self, state, config, level=None, testclass=None, testid=None, message=None, lineno=None, nodeid=None, explanation='', references=[]):
|
| 86 |
+
self.state = state
|
| 87 |
+
self.config = config
|
| 88 |
+
|
| 89 |
+
# Validation level to which the incident belongs. Integer 1-5.
|
| 90 |
+
self.level = self.default_level if level == None else level
|
| 91 |
+
# Thematic area to which the incident belongs: Format, Meta, Morpho,
|
| 92 |
+
# Syntax, Enhanced, Coref, Warning.
|
| 93 |
+
self.testclass = self.default_testclass if testclass == None else testclass
|
| 94 |
+
# Identifier of the test that lead to the incident. Short string.
|
| 95 |
+
self.testid = self.default_testid if testid == None else testid
|
| 96 |
+
# Verbose description of the error for the user. It does not have to be
|
| 97 |
+
# identical for all errors with the same testid because it can contain
|
| 98 |
+
# instance-specific data (e.g. the word form).
|
| 99 |
+
self.message = self.default_message if message == None else message
|
| 100 |
+
# Additional more verbose information. To be printed with the first
|
| 101 |
+
# incident of a given type.
|
| 102 |
+
self.explanation = explanation
|
| 103 |
+
# File name. The default is the file from which we are reading right
|
| 104 |
+
# now ('-' if reading from STDIN).
|
| 105 |
+
self.filename = state.get_current_file_name()
|
| 106 |
+
# Line number. The default is the most recently read line as recorded
|
| 107 |
+
# in the state; but in most cases we need to get the number
|
| 108 |
+
# during instantiation, as the most recently read line is the last line
|
| 109 |
+
# of the sentence, and the error was found on one of the words of the
|
| 110 |
+
# sentence.
|
| 111 |
+
self.lineno = lineno if lineno != None else self.default_lineno if self.default_lineno != None else state.current_line
|
| 112 |
+
if self.lineno < 0:
|
| 113 |
+
self.lineno = state.sentence_line
|
| 114 |
+
# Current (most recently read) sentence id.
|
| 115 |
+
self.sentid = state.sentence_id
|
| 116 |
+
# ID of the node on which the error occurred (if it pertains to one node).
|
| 117 |
+
self.nodeid = nodeid
|
| 118 |
+
# Additional references to nodes or other input lines, if needed.
|
| 119 |
+
# List of Reference objects. Note that the main anchor is not included
|
| 120 |
+
# in the list; it is described directly in filename, lineno, sentid and
|
| 121 |
+
# nodeid parameters.
|
| 122 |
+
self.references = references
|
| 123 |
+
|
| 124 |
+
def json(self):
|
| 125 |
+
"""
|
| 126 |
+
Returns the incident description in JSON format so it can be passed to
|
| 127 |
+
external applications easily.
|
| 128 |
+
"""
|
| 129 |
+
jsonlist = []
|
| 130 |
+
jsonlist.append(f'"level": "{self.level}"')
|
| 131 |
+
jsonlist.append(f'"type": "{str(self.get_type())}"')
|
| 132 |
+
jsonlist.append(f'"testclass": "{str(self.testclass)}"')
|
| 133 |
+
jsonlist.append(f'"testid": "{str(self.testid)}"')
|
| 134 |
+
jsonlist.append(f'"filename": {jenc.encode(str(self.filename))}')
|
| 135 |
+
jsonlist.append(f'"lineno": "{str(self.lineno)}"')
|
| 136 |
+
jsonlist.append(f'"sentid": {jenc.encode(str(self.sentid))}')
|
| 137 |
+
jsonlist.append(f'"nodeid": "{str(self.nodeid)}"')
|
| 138 |
+
jsonlist.append(f'"message": {jenc.encode(str(self.message))}')
|
| 139 |
+
jsonlist.append(f'"explanation": {jenc.encode(str(self.explanation))}')
|
| 140 |
+
refjson = '[' + ', '.join([x.json() for x in self.references]) + ']'
|
| 141 |
+
jsonlist.append(f'"references": {refjson}')
|
| 142 |
+
return '{' + ', '.join(jsonlist) + '}'
|
| 143 |
+
|
| 144 |
+
def _count_me(self):
|
| 145 |
+
self.state.error_counter[self.get_type()][self.testclass] += 1
|
| 146 |
+
# Return 0 if we are not over max_err.
|
| 147 |
+
# Return 1 if we just crossed max_err (meaning we may want to print an explanation).
|
| 148 |
+
# Return 2 if we exceeded max_err by more than 1.
|
| 149 |
+
if 'max_err' in self.config and self.config['max_err'] > 0 and self.state.error_counter[self.get_type()][self.testclass] > self.config['max_err']:
|
| 150 |
+
if self.state.error_counter[self.get_type()][self.testclass] == self.config['max_err'] + 1:
|
| 151 |
+
return 1
|
| 152 |
+
else:
|
| 153 |
+
return 2
|
| 154 |
+
else:
|
| 155 |
+
return 0
|
| 156 |
+
|
| 157 |
+
def _store_me(self):
|
| 158 |
+
# self.state.error_tracker is a list of incidents.
|
| 159 |
+
if 'max_store' in self.config and self.config['max_store'] > 0 and len(self.state.error_tracker) >= self.config['max_store']:
|
| 160 |
+
return # we cannot store more incidents
|
| 161 |
+
self.state.error_tracker.append(self)
|
| 162 |
+
|
| 163 |
+
def __str__(self):
|
| 164 |
+
# If we are here, the error message should really be printed.
|
| 165 |
+
# Address of the incident.
|
| 166 |
+
address = f'Line {self.lineno} Sent {self.sentid}'
|
| 167 |
+
# Insert file name if there are several input files.
|
| 168 |
+
if 'report_filename' in self.config and self.config['report_filename']:
|
| 169 |
+
address = f'File {self.filename} ' + address
|
| 170 |
+
# Classification of the incident.
|
| 171 |
+
levelclassid = f'L{self.level} {self.testclass_to_report()} {self.testid}'
|
| 172 |
+
# Message (+ explanation, if this is the first error of its kind).
|
| 173 |
+
message = self.message
|
| 174 |
+
if self.explanation and self.explanation not in self.state.explanation_printed:
|
| 175 |
+
message += "\n\n" + self.explanation + "\n"
|
| 176 |
+
self.state.explanation_printed.add(self.explanation)
|
| 177 |
+
return f'[{address}]: [{levelclassid}] {message}'
|
| 178 |
+
|
| 179 |
+
def __lt__(self, other):
|
| 180 |
+
return self.lineno < other.lineno
|
| 181 |
+
|
| 182 |
+
def confirm(self):
|
| 183 |
+
"""
|
| 184 |
+
An Incident object is typically created at the time we know the incident
|
| 185 |
+
(error or warning) really occurred. However, sometimes it is useful to
|
| 186 |
+
prepare the object when we observe one necessary condition, and then
|
| 187 |
+
wait whether we also encounter the other necessary conditions. Once we
|
| 188 |
+
know that all conditions have been met, we should call this method. It
|
| 189 |
+
will take care of registering the incident, reporting it, adjusting
|
| 190 |
+
error counters etc. In the typical situation, one calls .confirm()
|
| 191 |
+
immediately after one constructs the Incident object.
|
| 192 |
+
"""
|
| 193 |
+
# Even if we should be quiet, at least count the error.
|
| 194 |
+
too_many = self._count_me()
|
| 195 |
+
self._store_me()
|
| 196 |
+
# Check if this incident should be printed, and if so, print it.
|
| 197 |
+
if not self.config['output'] or 'quiet' in self.config and self.config['quiet']:
|
| 198 |
+
return
|
| 199 |
+
if 'no_warnings' in self.config and self.config['no_warnings'] and self.is_warning():
|
| 200 |
+
return
|
| 201 |
+
if 'exclude' in self.config and self.testid in self.config['exclude']:
|
| 202 |
+
return
|
| 203 |
+
if 'include_only' in self.config and self.testid not in self.config['include_only']:
|
| 204 |
+
return
|
| 205 |
+
# Suppress error messages of a type of which we have seen too many.
|
| 206 |
+
if too_many > 0:
|
| 207 |
+
if too_many == 1:
|
| 208 |
+
print(f'...suppressing further messages regarding {str(self.get_type())}/{str(self.testclass)}', file=self.config['output'])
|
| 209 |
+
return # suppressed
|
| 210 |
+
print(str(self), file=self.config['output'])
|
| 211 |
+
|
| 212 |
+
def get_type(self):
|
| 213 |
+
""" This method must be overridden in derived classes. """
|
| 214 |
+
raise NotImplementedError()
|
| 215 |
+
|
| 216 |
+
def is_error(self):
|
| 217 |
+
return self.get_type() == IncidentType.ERROR
|
| 218 |
+
|
| 219 |
+
def is_warning(self):
|
| 220 |
+
return self.get_type() == IncidentType.WARNING
|
| 221 |
+
|
| 222 |
+
def testclass_to_report(self):
|
| 223 |
+
""" This method must be overridden in derived classes. """
|
| 224 |
+
raise NotImplementedError()
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class Error(Incident):
|
| 229 |
+
def get_type(self):
|
| 230 |
+
return IncidentType.ERROR
|
| 231 |
+
def testclass_to_report(self):
|
| 232 |
+
return str(self.testclass)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
class Warning(Incident):
|
| 237 |
+
def get_type(self):
|
| 238 |
+
return IncidentType.WARNING
|
| 239 |
+
def testclass_to_report(self):
|
| 240 |
+
return 'WARNING'
|
ud-tools/udtools/src/udtools/level1.py
ADDED
|
@@ -0,0 +1,623 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
import unicodedata
|
| 6 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 7 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 8 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 9 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 10 |
+
# from udtools import Validator.
|
| 11 |
+
try:
|
| 12 |
+
import udtools.src.udtools.utils as utils
|
| 13 |
+
from udtools.src.udtools.incident import Incident, Error, TestClass
|
| 14 |
+
except ModuleNotFoundError:
|
| 15 |
+
import udtools.utils as utils
|
| 16 |
+
from udtools.incident import Incident, Error, TestClass
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Constants for the column indices
|
| 21 |
+
COLCOUNT=10
|
| 22 |
+
ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(COLCOUNT)
|
| 23 |
+
COLNAMES='ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC'.split(',')
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Level1:
|
| 28 |
+
#==============================================================================
|
| 29 |
+
# Level 1 tests. Only CoNLL-U backbone. Values can be empty or non-UD.
|
| 30 |
+
#==============================================================================
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def check_sentence_lines(self, state):
|
| 34 |
+
"""
|
| 35 |
+
Low-level tests of a block of input lines that should represent one
|
| 36 |
+
sentence. If we are validating a file or treebank, the block was
|
| 37 |
+
probably obtained by reading lines from the file until the next empty
|
| 38 |
+
line. But it is also possible that the caller is an annotation tool,
|
| 39 |
+
which wants to validate one sentence in isolation.
|
| 40 |
+
|
| 41 |
+
Parameters
|
| 42 |
+
----------
|
| 43 |
+
state : udtools.state.State
|
| 44 |
+
The state of the validation run.
|
| 45 |
+
|
| 46 |
+
Reads from state
|
| 47 |
+
----------------
|
| 48 |
+
current_lines : list(str)
|
| 49 |
+
List of lines in the sentence (comments and tokens), including
|
| 50 |
+
final empty line. The lines are not expected to include the final
|
| 51 |
+
newline character.
|
| 52 |
+
First we expect an optional block (zero or more lines) of comments,
|
| 53 |
+
i.e., lines starting with '#'. Then we expect a non-empty block
|
| 54 |
+
(one or more lines) of nodes, empty nodes, and multiword tokens.
|
| 55 |
+
Finally, we expect exactly one empty line.
|
| 56 |
+
current_line : int
|
| 57 |
+
The number of the most recently read line from the input file
|
| 58 |
+
(1-based).
|
| 59 |
+
|
| 60 |
+
Writes to state
|
| 61 |
+
----------------
|
| 62 |
+
comment_start_line : int
|
| 63 |
+
The line number (relative to input file, 1-based) of the first line
|
| 64 |
+
in the current sentence, including comments if any.
|
| 65 |
+
sentence_line : int
|
| 66 |
+
The line number (relative to input file, 1-based) of the first
|
| 67 |
+
node/token line in the current sentence.
|
| 68 |
+
|
| 69 |
+
Incidents
|
| 70 |
+
---------
|
| 71 |
+
misplaced-comment
|
| 72 |
+
pseudo-empty-line
|
| 73 |
+
extra-empty-line
|
| 74 |
+
empty-sentence
|
| 75 |
+
invalid-line
|
| 76 |
+
missing-empty-line
|
| 77 |
+
+ those issued by check_unicode_normalization()
|
| 78 |
+
|
| 79 |
+
Returns
|
| 80 |
+
-------
|
| 81 |
+
ok : bool
|
| 82 |
+
Is it OK to run subsequent checks? It can be OK even after some
|
| 83 |
+
less severe errors.
|
| 84 |
+
"""
|
| 85 |
+
Incident.default_level = 1
|
| 86 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 87 |
+
# When we arrive here, state.current_line points to the last line of the
|
| 88 |
+
# sentence, that is, the terminating empty line (if the input is valid).
|
| 89 |
+
lines = state.current_lines
|
| 90 |
+
n_lines = len(lines)
|
| 91 |
+
state.comment_start_line = state.current_line - n_lines + 1
|
| 92 |
+
state.sentence_line = state.comment_start_line # temporarily, until we find the first token
|
| 93 |
+
seen_non_comment = False # once we see non-comment, no further comments allowed
|
| 94 |
+
seen_token_node = False # at least one such line per sentence required
|
| 95 |
+
last_line_is_empty = False
|
| 96 |
+
ok = True # is it ok to run subsequent tests? It can be ok even after some less severe errors.
|
| 97 |
+
for i in range(n_lines):
|
| 98 |
+
lineno = state.comment_start_line + i
|
| 99 |
+
line = lines[i]
|
| 100 |
+
self.check_unicode_normalization(state, line, lineno)
|
| 101 |
+
# Comment lines.
|
| 102 |
+
if line and line[0] == '#':
|
| 103 |
+
# We will really validate sentence ids later. But now we want to remember
|
| 104 |
+
# everything that looks like a sentence id and use it in the error messages.
|
| 105 |
+
# Line numbers themselves may not be sufficient if we are reading multiple
|
| 106 |
+
# files from a pipe.
|
| 107 |
+
match = utils.crex.sentid.fullmatch(line)
|
| 108 |
+
if match:
|
| 109 |
+
state.sentence_id = match.group(1)
|
| 110 |
+
if seen_non_comment:
|
| 111 |
+
Error(
|
| 112 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 113 |
+
testid='misplaced-comment',
|
| 114 |
+
message='Spurious comment line. Comments are only allowed before a sentence.'
|
| 115 |
+
).confirm()
|
| 116 |
+
ok = False
|
| 117 |
+
else:
|
| 118 |
+
if not seen_non_comment:
|
| 119 |
+
state.sentence_line = state.comment_start_line + i
|
| 120 |
+
seen_non_comment = True
|
| 121 |
+
# Token/node lines.
|
| 122 |
+
if line and line[0].isdigit():
|
| 123 |
+
seen_token_node = True
|
| 124 |
+
# Empty line (end of sentence).
|
| 125 |
+
elif not line or utils.is_whitespace(line):
|
| 126 |
+
# Lines consisting of space/tab characters are non-empty and invalid,
|
| 127 |
+
# so we will report an error but otherwise we will treat them as empty
|
| 128 |
+
# lines to prevent confusing subsequent errors.
|
| 129 |
+
if utils.is_whitespace(line):
|
| 130 |
+
Error(
|
| 131 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 132 |
+
testid='pseudo-empty-line',
|
| 133 |
+
message='Spurious line that appears empty but is not; there are whitespace characters.'
|
| 134 |
+
).confirm()
|
| 135 |
+
# If the input lines were read from the input stream, there
|
| 136 |
+
# will be at most one empty line and it will be the last line
|
| 137 |
+
# (because it triggered returning a sentence). However, the
|
| 138 |
+
# list of lines may come from other sources (any user can
|
| 139 |
+
# ask for validation of their list of lines) and then we may
|
| 140 |
+
# encounter empty lines anywhere.
|
| 141 |
+
if i != n_lines-1:
|
| 142 |
+
Error(
|
| 143 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 144 |
+
testid='extra-empty-line',
|
| 145 |
+
message='Spurious empty line that is not the last line of a sentence.'
|
| 146 |
+
).confirm()
|
| 147 |
+
ok = False
|
| 148 |
+
else:
|
| 149 |
+
last_line_is_empty = True
|
| 150 |
+
if not seen_token_node:
|
| 151 |
+
Error(
|
| 152 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 153 |
+
testid='empty-sentence',
|
| 154 |
+
message='Sentence must not be empty. Only one empty line is expected after every sentence.'
|
| 155 |
+
).confirm()
|
| 156 |
+
ok = False
|
| 157 |
+
# A line which is neither a comment nor a token/word, nor empty. That's bad!
|
| 158 |
+
else:
|
| 159 |
+
Error(
|
| 160 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 161 |
+
testid='invalid-line',
|
| 162 |
+
message=f"Spurious line: '{line}'. All non-empty lines should start with a digit or the # character."
|
| 163 |
+
).confirm()
|
| 164 |
+
ok = False
|
| 165 |
+
# If the last line is not empty (e.g. because the file ended prematurely),
|
| 166 |
+
# it is an error.
|
| 167 |
+
if not last_line_is_empty:
|
| 168 |
+
Error(
|
| 169 |
+
state=state, config=self.incfg, lineno=state.current_line,
|
| 170 |
+
testid='missing-empty-line',
|
| 171 |
+
message='Missing empty line after the sentence.'
|
| 172 |
+
).confirm()
|
| 173 |
+
ok = seen_token_node
|
| 174 |
+
return ok
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def check_sentence_columns(self, state):
|
| 178 |
+
"""
|
| 179 |
+
Low-level tests of the token/node lines of one sentence. The lines
|
| 180 |
+
should have been already checked by check_sentence_lines() and all
|
| 181 |
+
should start with a digit. We will split them to columns (cells),
|
| 182 |
+
check that there is the expected number of columns and that they are
|
| 183 |
+
not empty.
|
| 184 |
+
|
| 185 |
+
Parameters
|
| 186 |
+
----------
|
| 187 |
+
state : udtools.state.State
|
| 188 |
+
The state of the validation run.
|
| 189 |
+
|
| 190 |
+
Reads from state
|
| 191 |
+
----------------
|
| 192 |
+
current_lines : list(str)
|
| 193 |
+
List of lines in the sentence (comments and tokens), including
|
| 194 |
+
final empty line. The lines are not expected to include the final
|
| 195 |
+
newline character.
|
| 196 |
+
comment_start_line : int
|
| 197 |
+
The line number (relative to input file, 1-based) of the first line
|
| 198 |
+
in the current sentence, including comments if any.
|
| 199 |
+
sentence_line : int
|
| 200 |
+
The line number (relative to input file, 1-based) of the first
|
| 201 |
+
node/token line in the current sentence.
|
| 202 |
+
|
| 203 |
+
Writes to state
|
| 204 |
+
----------------
|
| 205 |
+
current_token_node_table : list(list(str))
|
| 206 |
+
The list of multiword token lines / regular node lines / empty node
|
| 207 |
+
lines, each split to fields (columns).
|
| 208 |
+
|
| 209 |
+
Incidents
|
| 210 |
+
---------
|
| 211 |
+
number-of-columns
|
| 212 |
+
+ those issued by check_whitespace()
|
| 213 |
+
|
| 214 |
+
Returns
|
| 215 |
+
-------
|
| 216 |
+
ok : bool
|
| 217 |
+
Is it OK to run subsequent checks? It can be OK even after some
|
| 218 |
+
less severe errors.
|
| 219 |
+
"""
|
| 220 |
+
Incident.default_level = 1
|
| 221 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 222 |
+
n_comment_lines = state.sentence_line-state.comment_start_line
|
| 223 |
+
n_lines = len(state.current_lines)
|
| 224 |
+
# Normally we should exclude the last line because it is the empty line
|
| 225 |
+
# terminating the sentence. But if the empty line is missing (which is
|
| 226 |
+
# an error that we reported elsewhere), we must keep the last line.
|
| 227 |
+
range_end = n_lines-1 if (not state.current_lines[-1] or utils.is_whitespace(state.current_lines[-1])) else n_lines
|
| 228 |
+
token_lines = state.current_lines[n_comment_lines:range_end]
|
| 229 |
+
n_token_lines = len(token_lines)
|
| 230 |
+
token_lines_fields = [] # List of token/word lines of the current sentence, converted from string to list of fields.
|
| 231 |
+
ok = True # is it ok to run subsequent tests? It can be ok even after some less severe errors.
|
| 232 |
+
for i in range(n_token_lines):
|
| 233 |
+
lineno = state.sentence_line + i
|
| 234 |
+
line = token_lines[i]
|
| 235 |
+
cols = line.split("\t")
|
| 236 |
+
token_lines_fields.append(cols)
|
| 237 |
+
# If there is an unexpected number of columns, do not test their contents.
|
| 238 |
+
# Maybe the contents belongs to a different column. And we could see
|
| 239 |
+
# an exception if a column value is missing.
|
| 240 |
+
if len(cols) == COLCOUNT:
|
| 241 |
+
# Low-level tests, mostly universal constraints on whitespace in fields, also format of the ID field.
|
| 242 |
+
self.check_whitespace(state, cols, lineno)
|
| 243 |
+
else:
|
| 244 |
+
Error(
|
| 245 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 246 |
+
testid='number-of-columns',
|
| 247 |
+
message=f'The line has {len(cols)} columns but {COLCOUNT} are expected.'
|
| 248 |
+
).confirm()
|
| 249 |
+
ok = False
|
| 250 |
+
state.current_token_node_table = token_lines_fields
|
| 251 |
+
return ok
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
#------------------------------------------------------------------------------
|
| 256 |
+
# Level 1 tests applicable to a single line independently of the others.
|
| 257 |
+
#------------------------------------------------------------------------------
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def check_unicode_normalization(self, state, text, lineno):
|
| 262 |
+
"""
|
| 263 |
+
Tests that letters composed of multiple Unicode characters (such as a base
|
| 264 |
+
letter plus combining diacritics) conform to NFC normalization (canonical
|
| 265 |
+
decomposition followed by canonical composition).
|
| 266 |
+
|
| 267 |
+
Parameters
|
| 268 |
+
----------
|
| 269 |
+
text : str
|
| 270 |
+
The input line to be tested. If the line consists of TAB-separated
|
| 271 |
+
fields (token line), errors reports will specify the field where the
|
| 272 |
+
error occurred. Otherwise (comment line), the error report will not be
|
| 273 |
+
localized.
|
| 274 |
+
|
| 275 |
+
Incidents
|
| 276 |
+
---------
|
| 277 |
+
unicode-normalization
|
| 278 |
+
"""
|
| 279 |
+
normalized_text = unicodedata.normalize('NFC', text)
|
| 280 |
+
if text != normalized_text:
|
| 281 |
+
# Find the first unmatched character and include it in the report.
|
| 282 |
+
firsti = -1
|
| 283 |
+
firstj = -1
|
| 284 |
+
inpfirst = ''
|
| 285 |
+
inpsecond = ''
|
| 286 |
+
nfcfirst = ''
|
| 287 |
+
tcols = text.split("\t")
|
| 288 |
+
ncols = normalized_text.split("\t")
|
| 289 |
+
for i in range(len(tcols)):
|
| 290 |
+
for j in range(len(tcols[i])):
|
| 291 |
+
if tcols[i][j] != ncols[i][j]:
|
| 292 |
+
firsti = i
|
| 293 |
+
firstj = j
|
| 294 |
+
inpfirst = unicodedata.name(tcols[i][j])
|
| 295 |
+
nfcfirst = unicodedata.name(ncols[i][j])
|
| 296 |
+
if j+1 < len(tcols[i]):
|
| 297 |
+
inpsecond = unicodedata.name(tcols[i][j+1])
|
| 298 |
+
break
|
| 299 |
+
if firsti >= 0:
|
| 300 |
+
break
|
| 301 |
+
if len(tcols) > 1:
|
| 302 |
+
testmessage = f"Unicode not normalized: {COLNAMES[firsti]}.character[{firstj}] is {inpfirst}, should be {nfcfirst}."
|
| 303 |
+
else:
|
| 304 |
+
testmessage = f"Unicode not normalized: character[{firstj}] is {inpfirst}, should be {nfcfirst}."
|
| 305 |
+
explanation_second = f" In this case, your next character is {inpsecond}." if inpsecond else ''
|
| 306 |
+
Error(
|
| 307 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 308 |
+
level=1,
|
| 309 |
+
testclass=TestClass.UNICODE,
|
| 310 |
+
testid='unicode-normalization',
|
| 311 |
+
message=testmessage,
|
| 312 |
+
explanation=f"This error usually does not mean that {inpfirst} is an invalid character. Usually it means that this is a base character followed by combining diacritics, and you should replace them by a single combined character.{explanation_second} You can fix normalization errors using the normalize_unicode.pl script from the tools repository."
|
| 313 |
+
).confirm()
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def check_whitespace(self, state, cols, lineno):
|
| 318 |
+
"""
|
| 319 |
+
Checks that columns are not empty and do not contain whitespace characters
|
| 320 |
+
except for patterns that could be allowed at level 4. Applies to all types
|
| 321 |
+
of TAB-containing lines: nodes / words, mwt ranges, empty nodes.
|
| 322 |
+
|
| 323 |
+
Parameters
|
| 324 |
+
----------
|
| 325 |
+
cols : list
|
| 326 |
+
The values of the columns on the current node / token line.
|
| 327 |
+
|
| 328 |
+
Incidents
|
| 329 |
+
---------
|
| 330 |
+
invalid-whitespace-mwt
|
| 331 |
+
invalid-whitespace
|
| 332 |
+
empty-column
|
| 333 |
+
leading-whitespace
|
| 334 |
+
trailing-whitespace
|
| 335 |
+
repeated-whitespace
|
| 336 |
+
"""
|
| 337 |
+
Incident.default_level = 1
|
| 338 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 339 |
+
Incident.default_lineno = lineno
|
| 340 |
+
# Some whitespace may be permitted in FORM, LEMMA and MISC but not elsewhere.
|
| 341 |
+
# Multi-word tokens may have whitespaces in MISC but not in FORM or LEMMA.
|
| 342 |
+
# If it contains a space, it does not make sense to treat it as a MWT.
|
| 343 |
+
ismwt = utils.is_multiword_token(cols)
|
| 344 |
+
for col_idx in range(COLCOUNT):
|
| 345 |
+
if col_idx >= len(cols):
|
| 346 |
+
break # this has been already reported in next_sentence()
|
| 347 |
+
if ismwt and col_idx in (FORM, LEMMA) and utils.crex.ws.search(cols[col_idx]):
|
| 348 |
+
Error(
|
| 349 |
+
state=state, config=self.incfg,
|
| 350 |
+
testid='invalid-whitespace-mwt',
|
| 351 |
+
message=f"White space not allowed in multi-word token '{cols[col_idx]}'. If it contains a space, it is not a single surface token."
|
| 352 |
+
).confirm()
|
| 353 |
+
# These columns must not have whitespace.
|
| 354 |
+
elif col_idx in (ID, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS) and utils.crex.ws.search(cols[col_idx]):
|
| 355 |
+
Error(
|
| 356 |
+
state=state, config=self.incfg,
|
| 357 |
+
testid='invalid-whitespace',
|
| 358 |
+
message=f"White space not allowed in column {COLNAMES[col_idx]}: '{cols[col_idx]}'."
|
| 359 |
+
).confirm()
|
| 360 |
+
# Only perform the following tests if we have not found and reported a space above.
|
| 361 |
+
else:
|
| 362 |
+
# Must never be empty
|
| 363 |
+
if not cols[col_idx]:
|
| 364 |
+
Error(
|
| 365 |
+
state=state, config=self.incfg,
|
| 366 |
+
testid='empty-column',
|
| 367 |
+
message=f"Empty value in column {COLNAMES[col_idx]}: '{cols[col_idx]}'."
|
| 368 |
+
).confirm()
|
| 369 |
+
else:
|
| 370 |
+
# Must never have leading/trailing/repeated whitespace.
|
| 371 |
+
# This will be only reported for columns that allow whitespace in general.
|
| 372 |
+
if cols[col_idx][0].isspace():
|
| 373 |
+
Error(
|
| 374 |
+
state=state, config=self.incfg,
|
| 375 |
+
testid='leading-whitespace',
|
| 376 |
+
message=f"Leading whitespace not allowed in column {COLNAMES[col_idx]}: '{cols[col_idx]}'."
|
| 377 |
+
).confirm()
|
| 378 |
+
if cols[col_idx][-1].isspace():
|
| 379 |
+
Error(
|
| 380 |
+
state=state, config=self.incfg,
|
| 381 |
+
testid='trailing-whitespace',
|
| 382 |
+
message=f"Trailing whitespace not allowed in column {COLNAMES[col_idx]}: '{cols[col_idx]}'."
|
| 383 |
+
).confirm()
|
| 384 |
+
# Must never contain two consecutive whitespace characters
|
| 385 |
+
if utils.crex.ws2.search(cols[col_idx]):
|
| 386 |
+
Error(
|
| 387 |
+
state=state, config=self.incfg,
|
| 388 |
+
testid='repeated-whitespace',
|
| 389 |
+
message=f"Two or more consecutive whitespace characters not allowed in column {COLNAMES[col_idx]}: '{cols[col_idx]}'."
|
| 390 |
+
).confirm()
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
#------------------------------------------------------------------------------
|
| 395 |
+
# Level 1 tests applicable to the whole sentence.
|
| 396 |
+
#------------------------------------------------------------------------------
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def check_id_sequence(self, state):
|
| 401 |
+
"""
|
| 402 |
+
Validates that the ID sequence is correctly formed.
|
| 403 |
+
Besides reporting the errors, it also returns False to the caller so it can
|
| 404 |
+
avoid building a tree from corrupt IDs.
|
| 405 |
+
|
| 406 |
+
Parameters
|
| 407 |
+
----------
|
| 408 |
+
state : udtools.state.State
|
| 409 |
+
The state of the validation run.
|
| 410 |
+
|
| 411 |
+
Reads from state
|
| 412 |
+
----------------
|
| 413 |
+
current_token_node_table : list(list(str))
|
| 414 |
+
The list of multiword token lines / regular node lines / empty node
|
| 415 |
+
lines, each split to fields (columns).
|
| 416 |
+
sentence_line : int
|
| 417 |
+
The line number (relative to input file, 1-based) of the first
|
| 418 |
+
node/token line in the current sentence.
|
| 419 |
+
|
| 420 |
+
Incidents
|
| 421 |
+
---------
|
| 422 |
+
invalid-word-id
|
| 423 |
+
invalid-word-interval
|
| 424 |
+
misplaced-word-interval
|
| 425 |
+
misplaced-empty-node
|
| 426 |
+
word-id-sequence
|
| 427 |
+
reversed-word-interval
|
| 428 |
+
word-interval-out
|
| 429 |
+
|
| 430 |
+
Returns
|
| 431 |
+
-------
|
| 432 |
+
ok : bool
|
| 433 |
+
Is it OK to run subsequent checks? It can be OK even after some
|
| 434 |
+
less severe errors.
|
| 435 |
+
"""
|
| 436 |
+
ok = True
|
| 437 |
+
Incident.default_level = 1
|
| 438 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 439 |
+
words=[]
|
| 440 |
+
tokens=[]
|
| 441 |
+
current_word_id, next_empty_id = 0, 1
|
| 442 |
+
for i in range(len(state.current_token_node_table)):
|
| 443 |
+
lineno = state.sentence_line + i
|
| 444 |
+
cols = state.current_token_node_table[i]
|
| 445 |
+
# Check for the format of the ID value. (ID must not be empty.)
|
| 446 |
+
if not (utils.is_word(cols) or utils.is_empty_node(cols) or utils.is_multiword_token(cols)):
|
| 447 |
+
Error(
|
| 448 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 449 |
+
testid='invalid-word-id',
|
| 450 |
+
message=f"Unexpected ID format '{cols[ID]}'."
|
| 451 |
+
).confirm()
|
| 452 |
+
ok = False
|
| 453 |
+
continue
|
| 454 |
+
if not utils.is_empty_node(cols):
|
| 455 |
+
next_empty_id = 1 # reset sequence
|
| 456 |
+
if utils.is_word(cols):
|
| 457 |
+
t_id = int(cols[ID])
|
| 458 |
+
current_word_id = t_id
|
| 459 |
+
words.append(t_id)
|
| 460 |
+
# Not covered by the previous interval?
|
| 461 |
+
if not (tokens and tokens[-1][0] <= t_id and tokens[-1][1] >= t_id):
|
| 462 |
+
tokens.append((t_id, t_id, lineno)) # nope - let's make a default interval for it
|
| 463 |
+
elif utils.is_multiword_token(cols):
|
| 464 |
+
match = utils.crex.mwtid.fullmatch(cols[ID]) # Check the interval against the regex
|
| 465 |
+
if not match: # This should not happen. The function utils.is_multiword_token() would then not return True.
|
| 466 |
+
Error(
|
| 467 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 468 |
+
testid='invalid-word-interval',
|
| 469 |
+
message=f"Spurious word interval definition: '{cols[ID]}'."
|
| 470 |
+
).confirm()
|
| 471 |
+
ok = False
|
| 472 |
+
continue
|
| 473 |
+
beg, end = int(match.group(1)), int(match.group(2))
|
| 474 |
+
if not ((not words and beg >= 1) or (words and beg >= words[-1] + 1)):
|
| 475 |
+
Error(
|
| 476 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 477 |
+
testid='misplaced-word-interval',
|
| 478 |
+
message='Multiword range not before its first word.'
|
| 479 |
+
).confirm()
|
| 480 |
+
ok = False
|
| 481 |
+
continue
|
| 482 |
+
tokens.append((beg, end, lineno))
|
| 483 |
+
elif utils.is_empty_node(cols):
|
| 484 |
+
word_id, empty_id = (int(i) for i in utils.parse_empty_node_id(cols))
|
| 485 |
+
if word_id != current_word_id or empty_id != next_empty_id:
|
| 486 |
+
Error(
|
| 487 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 488 |
+
testid='misplaced-empty-node',
|
| 489 |
+
message=f'Empty node id {cols[ID]}, expected {current_word_id}.{next_empty_id}'
|
| 490 |
+
).confirm()
|
| 491 |
+
ok = False
|
| 492 |
+
next_empty_id += 1
|
| 493 |
+
# Interaction of multiword tokens and empty nodes if there is an empty
|
| 494 |
+
# node between the first word of a multiword token and the previous word:
|
| 495 |
+
# This sequence is correct: 4 4.1 5-6 5 6
|
| 496 |
+
# This sequence is wrong: 4 5-6 4.1 5 6
|
| 497 |
+
if word_id == current_word_id and tokens and word_id < tokens[-1][0]:
|
| 498 |
+
Error(
|
| 499 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 500 |
+
testid='misplaced-empty-node',
|
| 501 |
+
message=f"Empty node id {cols[ID]} must occur before multiword token {tokens[-1][0]}-{tokens[-1][1]}."
|
| 502 |
+
).confirm()
|
| 503 |
+
ok = False
|
| 504 |
+
# Now let's do some basic sanity checks on the sequences.
|
| 505 |
+
# Expected sequence of word IDs is 1, 2, ...
|
| 506 |
+
expstrseq = ','.join(str(x) for x in range(1, len(words) + 1))
|
| 507 |
+
wrdstrseq = ','.join(str(x) for x in words)
|
| 508 |
+
if wrdstrseq != expstrseq:
|
| 509 |
+
Error(
|
| 510 |
+
state=state, config=self.incfg, lineno=-1,
|
| 511 |
+
testid='word-id-sequence',
|
| 512 |
+
message=f"Words do not form a sequence. Got '{wrdstrseq}'. Expected '{expstrseq}'."
|
| 513 |
+
).confirm()
|
| 514 |
+
ok = False
|
| 515 |
+
# Check elementary sanity of word intervals.
|
| 516 |
+
# Remember that these are not just multi-word tokens. Here we have intervals even for single-word tokens (b=e)!
|
| 517 |
+
for (b, e, lineno) in tokens:
|
| 518 |
+
if e < b: # end before beginning
|
| 519 |
+
Error(
|
| 520 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 521 |
+
testid='reversed-word-interval',
|
| 522 |
+
message=f'Spurious token interval {b}-{e}'
|
| 523 |
+
).confirm()
|
| 524 |
+
ok = False
|
| 525 |
+
continue
|
| 526 |
+
if b < 1 or e > len(words): # out of range
|
| 527 |
+
Error(
|
| 528 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 529 |
+
testid='word-interval-out',
|
| 530 |
+
message=f'Spurious token interval {b}-{e} (out of range)'
|
| 531 |
+
).confirm()
|
| 532 |
+
ok = False
|
| 533 |
+
continue
|
| 534 |
+
return ok
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def check_token_range_overlaps(self, state):
|
| 539 |
+
"""
|
| 540 |
+
Checks that the word ranges for multiword tokens do not overlap.
|
| 541 |
+
|
| 542 |
+
Parameters
|
| 543 |
+
----------
|
| 544 |
+
state : udtools.state.State
|
| 545 |
+
The state of the validation run.
|
| 546 |
+
|
| 547 |
+
Reads from state
|
| 548 |
+
----------------
|
| 549 |
+
current_token_node_table : list(list(str))
|
| 550 |
+
The list of multiword token lines / regular node lines / empty node
|
| 551 |
+
lines, each split to fields (columns).
|
| 552 |
+
sentence_line : int
|
| 553 |
+
The line number (relative to input file, 1-based) of the first
|
| 554 |
+
node/token line in the current sentence.
|
| 555 |
+
|
| 556 |
+
Incidents
|
| 557 |
+
---------
|
| 558 |
+
invalid-word-interval
|
| 559 |
+
overlapping-word-intervals
|
| 560 |
+
|
| 561 |
+
Returns
|
| 562 |
+
-------
|
| 563 |
+
ok : bool
|
| 564 |
+
Is it OK to run subsequent checks? It can be OK even after some
|
| 565 |
+
less severe errors.
|
| 566 |
+
"""
|
| 567 |
+
ok = True
|
| 568 |
+
Incident.default_level = 1
|
| 569 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 570 |
+
covered = set()
|
| 571 |
+
for i in range(len(state.current_token_node_table)):
|
| 572 |
+
lineno = state.sentence_line + i
|
| 573 |
+
cols = state.current_token_node_table[i]
|
| 574 |
+
if not utils.is_multiword_token(cols):
|
| 575 |
+
continue
|
| 576 |
+
m = utils.crex.mwtid.fullmatch(cols[ID])
|
| 577 |
+
if not m: # This should not happen. The function utils.is_multiword_token() would then not return True.
|
| 578 |
+
Error(
|
| 579 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 580 |
+
testid='invalid-word-interval',
|
| 581 |
+
message=f"Spurious word interval definition: '{cols[ID]}'."
|
| 582 |
+
).confirm()
|
| 583 |
+
continue
|
| 584 |
+
start, end = m.groups()
|
| 585 |
+
start, end = int(start), int(end)
|
| 586 |
+
# Do not test if start >= end: This was already tested above in check_id_sequence().
|
| 587 |
+
if covered & set(range(start, end+1)):
|
| 588 |
+
Error(
|
| 589 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 590 |
+
testid='overlapping-word-intervals',
|
| 591 |
+
message=f'Range overlaps with others: {cols[ID]}'
|
| 592 |
+
).confirm()
|
| 593 |
+
ok = False
|
| 594 |
+
covered |= set(range(start, end+1))
|
| 595 |
+
return ok
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
#------------------------------------------------------------------------------
|
| 600 |
+
# Level 1 tests applicable to the whole input file.
|
| 601 |
+
#------------------------------------------------------------------------------
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
def check_newlines(self, state, inp):
|
| 606 |
+
"""
|
| 607 |
+
Checks that the input file consistently uses linux-style newlines (LF
|
| 608 |
+
only, not CR LF like in Windows). To be run on the input file handle
|
| 609 |
+
after the whole input has been read.
|
| 610 |
+
|
| 611 |
+
Incidents
|
| 612 |
+
---------
|
| 613 |
+
non-unix-newline
|
| 614 |
+
"""
|
| 615 |
+
if inp.newlines and inp.newlines != '\n':
|
| 616 |
+
Error(
|
| 617 |
+
state=state, config=self.incfg,
|
| 618 |
+
level=1,
|
| 619 |
+
testclass=TestClass.FORMAT,
|
| 620 |
+
lineno=state.current_line,
|
| 621 |
+
testid='non-unix-newline',
|
| 622 |
+
message='Only the unix-style LF line terminator is allowed.'
|
| 623 |
+
).confirm()
|
ud-tools/udtools/src/udtools/level2.py
ADDED
|
@@ -0,0 +1,1288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
# According to https://stackoverflow.com/questions/1832893/python-regex-matching-unicode-properties,
|
| 6 |
+
# the regex module has the same API as re but it can check Unicode character properties using \p{}
|
| 7 |
+
# as in Perl.
|
| 8 |
+
#import re
|
| 9 |
+
import regex as re
|
| 10 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 11 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 12 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 13 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 14 |
+
# from udtools import Validator.
|
| 15 |
+
try:
|
| 16 |
+
import udtools.src.udtools.utils as utils
|
| 17 |
+
from udtools.src.udtools.incident import Incident, Error, TestClass, Reference
|
| 18 |
+
from udtools.src.udtools.level1 import Level1
|
| 19 |
+
except ModuleNotFoundError:
|
| 20 |
+
import udtools.utils as utils
|
| 21 |
+
from udtools.incident import Incident, Error, TestClass, Reference
|
| 22 |
+
from udtools.level1 import Level1
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Constants for the column indices
|
| 27 |
+
COLCOUNT=10
|
| 28 |
+
ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(COLCOUNT)
|
| 29 |
+
COLNAMES='ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC'.split(',')
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Level2(Level1):
|
| 34 |
+
#==============================================================================
|
| 35 |
+
# Level 2 tests. Tree structure, universal tags and deprels. Note that any
|
| 36 |
+
# well-formed Feature=Value pair is allowed (because it could be language-
|
| 37 |
+
# specific) and any word form or lemma can contain spaces (because language-
|
| 38 |
+
# specific guidelines may permit it).
|
| 39 |
+
#==============================================================================
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
#------------------------------------------------------------------------------
|
| 44 |
+
# Level 2 tests applicable to a single line independently of the others.
|
| 45 |
+
#------------------------------------------------------------------------------
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def check_mwt_empty_vals(self, state, cols, line):
|
| 50 |
+
"""
|
| 51 |
+
Checks that a multi-word token has _ empty values in all fields except MISC.
|
| 52 |
+
This is required by UD guidelines although it is not a problem in general,
|
| 53 |
+
therefore a level 2 test.
|
| 54 |
+
|
| 55 |
+
Parameters
|
| 56 |
+
----------
|
| 57 |
+
state : udtools.state.State
|
| 58 |
+
The state of the validation run.
|
| 59 |
+
cols : list
|
| 60 |
+
The values of the columns on the current node / token line.
|
| 61 |
+
line : int
|
| 62 |
+
Number of the line where the node occurs in the file.
|
| 63 |
+
|
| 64 |
+
Incidents
|
| 65 |
+
---------
|
| 66 |
+
mwt-nonempty-field
|
| 67 |
+
"""
|
| 68 |
+
assert utils.is_multiword_token(cols), 'internal error'
|
| 69 |
+
for col_idx in range(LEMMA, MISC): # all columns except the first two (ID, FORM) and the last one (MISC)
|
| 70 |
+
# Exception: The feature Typo=Yes may occur in FEATS of a multi-word token.
|
| 71 |
+
if col_idx == FEATS and cols[col_idx] == 'Typo=Yes':
|
| 72 |
+
pass
|
| 73 |
+
elif cols[col_idx] != '_':
|
| 74 |
+
Error(
|
| 75 |
+
state=state, config=self.incfg,
|
| 76 |
+
lineno=line,
|
| 77 |
+
level=2,
|
| 78 |
+
testclass=TestClass.FORMAT,
|
| 79 |
+
testid='mwt-nonempty-field',
|
| 80 |
+
message=f"A multi-word token line must have '_' in the column {COLNAMES[col_idx]}. Now: '{cols[col_idx]}'."
|
| 81 |
+
).confirm()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def check_empty_node_empty_vals(self, state, cols, line):
|
| 87 |
+
"""
|
| 88 |
+
Checks that an empty node has _ empty values in HEAD and DEPREL. This is
|
| 89 |
+
required by UD guidelines but not necessarily by CoNLL-U, therefore
|
| 90 |
+
a level 2 test.
|
| 91 |
+
|
| 92 |
+
Parameters
|
| 93 |
+
----------
|
| 94 |
+
state : udtools.state.State
|
| 95 |
+
The state of the validation run.
|
| 96 |
+
cols : list
|
| 97 |
+
The values of the columns on the current node / token line.
|
| 98 |
+
line : int
|
| 99 |
+
Number of the line where the node occurs in the file.
|
| 100 |
+
|
| 101 |
+
Incidents
|
| 102 |
+
---------
|
| 103 |
+
empty-node-nonempty-field
|
| 104 |
+
"""
|
| 105 |
+
assert utils.is_empty_node(cols), 'internal error'
|
| 106 |
+
for col_idx in (HEAD, DEPREL):
|
| 107 |
+
if cols[col_idx]!= '_':
|
| 108 |
+
Error(
|
| 109 |
+
state=state, config=self.incfg,
|
| 110 |
+
lineno=line,
|
| 111 |
+
level=2,
|
| 112 |
+
testclass=TestClass.FORMAT,
|
| 113 |
+
testid='empty-node-nonempty-field',
|
| 114 |
+
message=f"An empty node must have '_' in the column {COLNAMES[col_idx]}. Now: '{cols[col_idx]}'."
|
| 115 |
+
).confirm()
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def check_upos(self, state, cols, line):
|
| 120 |
+
"""
|
| 121 |
+
Checks that the UPOS field contains one of the 17 known tags.
|
| 122 |
+
|
| 123 |
+
Parameters
|
| 124 |
+
----------
|
| 125 |
+
state : udtools.state.State
|
| 126 |
+
The state of the validation run.
|
| 127 |
+
cols : list
|
| 128 |
+
The values of the columns on the current node / token line.
|
| 129 |
+
line : int
|
| 130 |
+
Number of the line where the node occurs in the file.
|
| 131 |
+
|
| 132 |
+
Incidents
|
| 133 |
+
---------
|
| 134 |
+
unknown-upos
|
| 135 |
+
"""
|
| 136 |
+
if utils.is_empty_node(cols) and cols[UPOS] == '_':
|
| 137 |
+
return
|
| 138 |
+
# Just in case, we still match UPOS against the regular expression that
|
| 139 |
+
# checks general character constraints. However, the list of UPOS, loaded
|
| 140 |
+
# from a JSON file, should conform to the regular expression.
|
| 141 |
+
if not utils.crex.upos.fullmatch(cols[UPOS]) or cols[UPOS] not in self.data.upos:
|
| 142 |
+
Error(
|
| 143 |
+
state=state, config=self.incfg,
|
| 144 |
+
lineno=line,
|
| 145 |
+
level=2,
|
| 146 |
+
testclass=TestClass.MORPHO,
|
| 147 |
+
testid='unknown-upos',
|
| 148 |
+
message=f"Unknown UPOS tag: '{cols[UPOS]}'."
|
| 149 |
+
).confirm()
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def check_feats_format(self, state, cols, line):
|
| 154 |
+
"""
|
| 155 |
+
Checks general constraints on feature-value format: Permitted characters in
|
| 156 |
+
feature name and value, features must be sorted alphabetically, features
|
| 157 |
+
cannot be repeated etc.
|
| 158 |
+
|
| 159 |
+
Parameters
|
| 160 |
+
----------
|
| 161 |
+
state : udtools.state.State
|
| 162 |
+
The state of the validation run.
|
| 163 |
+
cols : list
|
| 164 |
+
The values of the columns on the current node / token line.
|
| 165 |
+
line : int
|
| 166 |
+
Number of the line where the node occurs in the file.
|
| 167 |
+
|
| 168 |
+
Incidents
|
| 169 |
+
---------
|
| 170 |
+
unsorted-features
|
| 171 |
+
invalid-feature
|
| 172 |
+
repeated-feature-value
|
| 173 |
+
unsorted-feature-values
|
| 174 |
+
invalid-feature-value
|
| 175 |
+
repeated-feature
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
safe : bool
|
| 180 |
+
There were no errors or the errors are not so severe that we should
|
| 181 |
+
refrain from loading the sentence into Udapi.
|
| 182 |
+
"""
|
| 183 |
+
Incident.default_lineno = line
|
| 184 |
+
Incident.default_level = 2
|
| 185 |
+
Incident.default_testclass = TestClass.MORPHO
|
| 186 |
+
feats = cols[FEATS]
|
| 187 |
+
if feats == '_':
|
| 188 |
+
return True
|
| 189 |
+
utils.features_present(state, line)
|
| 190 |
+
feat_list = feats.split('|')
|
| 191 |
+
if [f.lower() for f in feat_list] != sorted(f.lower() for f in feat_list):
|
| 192 |
+
Error(
|
| 193 |
+
state=state, config=self.incfg,
|
| 194 |
+
testid='unsorted-features',
|
| 195 |
+
message=f"Morphological features must be sorted: '{feats}'."
|
| 196 |
+
).confirm()
|
| 197 |
+
attr_set = set() # I'll gather the set of features here to check later that none is repeated.
|
| 198 |
+
# Subsequent higher-level tests could fail if a feature is not in the
|
| 199 |
+
# Feature=Value format. If that happens, we will return False and the caller
|
| 200 |
+
# can skip the more fragile tests.
|
| 201 |
+
safe = True
|
| 202 |
+
for f in feat_list:
|
| 203 |
+
match = utils.crex.featval.fullmatch(f)
|
| 204 |
+
if match is None:
|
| 205 |
+
Error(
|
| 206 |
+
state=state, config=self.incfg,
|
| 207 |
+
testid='invalid-feature',
|
| 208 |
+
message=f"Spurious morphological feature: '{f}'. Should be of the form Feature=Value and must start with [A-Z] and only contain [A-Za-z0-9]."
|
| 209 |
+
).confirm()
|
| 210 |
+
attr_set.add(f) # to prevent misleading error "Repeated features are disallowed"
|
| 211 |
+
safe = False
|
| 212 |
+
else:
|
| 213 |
+
# Check that the values are sorted as well
|
| 214 |
+
attr = match.group(1)
|
| 215 |
+
attr_set.add(attr)
|
| 216 |
+
values = match.group(2).split(',')
|
| 217 |
+
if len(values) != len(set(values)):
|
| 218 |
+
Error(
|
| 219 |
+
state=state, config=self.incfg,
|
| 220 |
+
testid='repeated-feature-value',
|
| 221 |
+
message=f"Repeated feature values are disallowed: '{feats}'"
|
| 222 |
+
).confirm()
|
| 223 |
+
if [v.lower() for v in values] != sorted(v.lower() for v in values):
|
| 224 |
+
Error(
|
| 225 |
+
state=state, config=self.incfg,
|
| 226 |
+
testid='unsorted-feature-values',
|
| 227 |
+
message=f"If a feature has multiple values, these must be sorted: '{f}'"
|
| 228 |
+
).confirm()
|
| 229 |
+
for v in values:
|
| 230 |
+
if not utils.crex.val.fullmatch(v):
|
| 231 |
+
Error(
|
| 232 |
+
state=state, config=self.incfg,
|
| 233 |
+
testid='invalid-feature-value',
|
| 234 |
+
message=f"Spurious value '{v}' in '{f}'. Must start with [A-Z0-9] and only contain [A-Za-z0-9]."
|
| 235 |
+
).confirm()
|
| 236 |
+
# Level 2 tests character properties and canonical order but not that the f-v pair is known.
|
| 237 |
+
if len(attr_set) != len(feat_list):
|
| 238 |
+
Error(
|
| 239 |
+
state=state, config=self.incfg,
|
| 240 |
+
testid='repeated-feature',
|
| 241 |
+
message=f"Repeated features are disallowed: '{feats}'."
|
| 242 |
+
).confirm()
|
| 243 |
+
return safe
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def check_deprel_format(self, state, cols, line):
|
| 248 |
+
"""
|
| 249 |
+
Checks general constraints on valid characters in DEPREL. Furthermore,
|
| 250 |
+
if the general character format is OK, checks that the main relation
|
| 251 |
+
type (udeprel) is defined in UD. Subtypes, if any, are ignored. This is
|
| 252 |
+
a level 2 test and it does not consult language-specific lists. It will
|
| 253 |
+
not report an error even if a main deprel is forbidden in a language.
|
| 254 |
+
This method checks the DEPREL column but not DEPS.
|
| 255 |
+
|
| 256 |
+
Parameters
|
| 257 |
+
----------
|
| 258 |
+
state : udtools.state.State
|
| 259 |
+
The state of the validation run.
|
| 260 |
+
cols : list
|
| 261 |
+
The values of the columns on the current node / token line.
|
| 262 |
+
line : int
|
| 263 |
+
Number of the line where the node occurs in the file.
|
| 264 |
+
|
| 265 |
+
Incidents
|
| 266 |
+
---------
|
| 267 |
+
invalid-deprel
|
| 268 |
+
unknown-deprel
|
| 269 |
+
"""
|
| 270 |
+
Incident.default_level = 2
|
| 271 |
+
Incident.default_lineno = line
|
| 272 |
+
if utils.is_multiword_token(cols):
|
| 273 |
+
return
|
| 274 |
+
# Empty nodes must have '_' in DEPREL but that has been already checked
|
| 275 |
+
# in check_empty_node_empty_vals().
|
| 276 |
+
if utils.is_empty_node(cols):
|
| 277 |
+
return
|
| 278 |
+
if not utils.crex.deprel.fullmatch(cols[DEPREL]):
|
| 279 |
+
Error(
|
| 280 |
+
state=state, config=self.incfg,
|
| 281 |
+
testclass=TestClass.SYNTAX,
|
| 282 |
+
testid='invalid-deprel',
|
| 283 |
+
message=f"Invalid DEPREL value '{cols[DEPREL]}'. Only lowercase English letters or a colon are expected."
|
| 284 |
+
).confirm()
|
| 285 |
+
else:
|
| 286 |
+
# At this level, ignore the language-specific lists and use
|
| 287 |
+
# language 'ud' instead.
|
| 288 |
+
deprelset = self.data.get_deprel_for_language('ud')
|
| 289 |
+
# Test only the universal part if testing at universal level.
|
| 290 |
+
deprel = utils.lspec2ud(cols[DEPREL])
|
| 291 |
+
if deprel not in deprelset:
|
| 292 |
+
Error(
|
| 293 |
+
state=state, config=self.incfg,
|
| 294 |
+
testclass=TestClass.SYNTAX,
|
| 295 |
+
testid='unknown-udeprel',
|
| 296 |
+
message=f"Unknown main DEPREL type: '{deprel}'."
|
| 297 |
+
).confirm()
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def check_deps_format(self, state, cols, line):
|
| 302 |
+
"""
|
| 303 |
+
Checks that DEPS is correctly formatted and that there are no
|
| 304 |
+
self-loops in DEPS (longer cycles are allowed in enhanced graphs but
|
| 305 |
+
self-loops are not).
|
| 306 |
+
|
| 307 |
+
For each relation in DEPS, it also checks the general constraints on
|
| 308 |
+
valid characters in DEPS. If the general character format is OK, checks
|
| 309 |
+
that the main relation type of each relation in DEPS is on the list of
|
| 310 |
+
main deprel types defined in UD. If there is a subtype, it is ignored.
|
| 311 |
+
This is a level 2 test and it does not consult language-specific lists.
|
| 312 |
+
It will not report an error even if a main deprel is forbidden in the
|
| 313 |
+
language.
|
| 314 |
+
|
| 315 |
+
This function must be run on raw DEPS before it is fed into Udapi because
|
| 316 |
+
it checks the order of relations, which is not guaranteed to be preserved
|
| 317 |
+
in Udapi. On the other hand, we assume that it is run after
|
| 318 |
+
check_id_references() and only if DEPS is parsable and the head indices
|
| 319 |
+
in it are OK.
|
| 320 |
+
|
| 321 |
+
Parameters
|
| 322 |
+
----------
|
| 323 |
+
state : udtools.state.State
|
| 324 |
+
The state of the validation run.
|
| 325 |
+
cols : list
|
| 326 |
+
The values of the columns on the current node / token line.
|
| 327 |
+
line : int
|
| 328 |
+
Number of the line where the node occurs in the file.
|
| 329 |
+
|
| 330 |
+
Incidents
|
| 331 |
+
---------
|
| 332 |
+
unsorted-deps
|
| 333 |
+
unsorted-deps-2
|
| 334 |
+
repeated-deps
|
| 335 |
+
deps-self-loop
|
| 336 |
+
invalid-edeprel
|
| 337 |
+
unknown-eudeprel
|
| 338 |
+
"""
|
| 339 |
+
Incident.default_level = 2
|
| 340 |
+
Incident.default_lineno = line
|
| 341 |
+
if utils.is_multiword_token(cols):
|
| 342 |
+
return
|
| 343 |
+
if cols[DEPS] == '_':
|
| 344 |
+
return
|
| 345 |
+
# Remember whether there is at least one difference between the basic
|
| 346 |
+
# tree and the enhanced graph in the entire dataset.
|
| 347 |
+
if cols[DEPS] != '_' and cols[DEPS] != cols[HEAD]+':'+cols[DEPREL]:
|
| 348 |
+
state.seen_enhancement = line
|
| 349 |
+
# We should have called check_id_references() before (and only come
|
| 350 |
+
# here if that check succeeded); since utils.deps_list() is called
|
| 351 |
+
# there, it should be now guaranteed that the contents of DEPS is
|
| 352 |
+
# parsable.
|
| 353 |
+
edeps = utils.deps_list(cols)
|
| 354 |
+
heads = [utils.nodeid2tuple(h) for h, d in edeps]
|
| 355 |
+
if heads != sorted(heads):
|
| 356 |
+
Error(
|
| 357 |
+
state=state, config=self.incfg,
|
| 358 |
+
testclass=TestClass.FORMAT,
|
| 359 |
+
testid='unsorted-deps',
|
| 360 |
+
message=f"DEPS not sorted by head index: '{cols[DEPS]}'."
|
| 361 |
+
).confirm()
|
| 362 |
+
else:
|
| 363 |
+
lasth = None
|
| 364 |
+
lastd = None
|
| 365 |
+
for h, d in edeps:
|
| 366 |
+
if h == lasth:
|
| 367 |
+
if d < lastd:
|
| 368 |
+
Error(
|
| 369 |
+
state=state, config=self.incfg,
|
| 370 |
+
testclass=TestClass.FORMAT,
|
| 371 |
+
testid='unsorted-deps-2',
|
| 372 |
+
message=f"DEPS pointing to head '{h}' not sorted by relation type: '{cols[DEPS]}'."
|
| 373 |
+
).confirm()
|
| 374 |
+
elif d == lastd:
|
| 375 |
+
Error(
|
| 376 |
+
state=state, config=self.incfg,
|
| 377 |
+
testclass=TestClass.FORMAT,
|
| 378 |
+
testid='repeated-deps',
|
| 379 |
+
message=f"DEPS contain multiple instances of the same relation '{h}:{d}'."
|
| 380 |
+
).confirm()
|
| 381 |
+
lasth = h
|
| 382 |
+
lastd = d
|
| 383 |
+
id_ = utils.nodeid2tuple(cols[ID])
|
| 384 |
+
if id_ in heads:
|
| 385 |
+
Error(
|
| 386 |
+
state=state, config=self.incfg,
|
| 387 |
+
testclass=TestClass.ENHANCED,
|
| 388 |
+
testid='deps-self-loop',
|
| 389 |
+
message=f"Self-loop in DEPS for '{cols[ID]}'"
|
| 390 |
+
).confirm()
|
| 391 |
+
# At this level, ignore the language-specific lists and use language
|
| 392 |
+
# 'ud' instead.
|
| 393 |
+
deprelset = self.data.get_deprel_for_language('ud')
|
| 394 |
+
deprelset.add('ref')
|
| 395 |
+
for head, deprel in edeps:
|
| 396 |
+
if not utils.crex.edeprel.fullmatch(deprel):
|
| 397 |
+
Error(
|
| 398 |
+
state=state, config=self.incfg,
|
| 399 |
+
testclass=TestClass.ENHANCED,
|
| 400 |
+
testid='invalid-edeprel',
|
| 401 |
+
message=f"Invalid enhanced relation type: '{cols[DEPS]}'."
|
| 402 |
+
).confirm()
|
| 403 |
+
else:
|
| 404 |
+
# Test only the universal part if testing at universal level.
|
| 405 |
+
udeprel = utils.lspec2ud(deprel)
|
| 406 |
+
if not udeprel in deprelset:
|
| 407 |
+
Error(
|
| 408 |
+
state=state, config=self.incfg,
|
| 409 |
+
testclass=TestClass.ENHANCED,
|
| 410 |
+
testid='unknown-eudeprel',
|
| 411 |
+
message=f"Unknown main relation type '{udeprel}' in '{head}:{deprel}'."
|
| 412 |
+
).confirm()
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def check_misc(self, state, cols, line):
|
| 417 |
+
"""
|
| 418 |
+
In general, the MISC column can contain almost anything. However, if there
|
| 419 |
+
is a vertical bar character, it is interpreted as the separator of two
|
| 420 |
+
MISC attributes, which may or may not have the form of attribute=value pair.
|
| 421 |
+
In general it is not forbidden that the same attribute appears several times
|
| 422 |
+
with different values, but this should not happen for selected attributes
|
| 423 |
+
that are described in the UD documentation.
|
| 424 |
+
|
| 425 |
+
This function must be run on raw MISC before it is fed into Udapi because
|
| 426 |
+
Udapi is not prepared for some of the less recommended usages of MISC.
|
| 427 |
+
|
| 428 |
+
Parameters
|
| 429 |
+
----------
|
| 430 |
+
state : udtools.state.State
|
| 431 |
+
The state of the validation run.
|
| 432 |
+
cols : list
|
| 433 |
+
The values of the columns on the current node / token line.
|
| 434 |
+
line : int
|
| 435 |
+
Number of the line where the node occurs in the file.
|
| 436 |
+
|
| 437 |
+
Incidents
|
| 438 |
+
---------
|
| 439 |
+
empty-misc
|
| 440 |
+
empty-misc-key
|
| 441 |
+
misc-extra-space
|
| 442 |
+
misc-attr-typo
|
| 443 |
+
repeated-misc
|
| 444 |
+
"""
|
| 445 |
+
Incident.default_lineno = line
|
| 446 |
+
Incident.default_level = 2
|
| 447 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 448 |
+
if cols[MISC] == '_':
|
| 449 |
+
return
|
| 450 |
+
misc = [ma.split('=', 1) for ma in cols[MISC].split('|')]
|
| 451 |
+
mamap = {}
|
| 452 |
+
for ma in misc:
|
| 453 |
+
if ma[0] == '':
|
| 454 |
+
if len(ma) == 1:
|
| 455 |
+
Warning(
|
| 456 |
+
state=state, config=self.incfg,
|
| 457 |
+
testid='empty-misc',
|
| 458 |
+
message="Empty attribute in MISC; possible misinterpreted vertical bar?"
|
| 459 |
+
).confirm()
|
| 460 |
+
else:
|
| 461 |
+
Warning(
|
| 462 |
+
state=state, config=self.incfg,
|
| 463 |
+
testid='empty-misc-key',
|
| 464 |
+
message=f"Empty MISC attribute name in '{ma[0]}={ma[1]}'."
|
| 465 |
+
).confirm()
|
| 466 |
+
# We do not warn about MISC items that do not contain '='.
|
| 467 |
+
# But the remaining error messages below assume that ma[1] exists.
|
| 468 |
+
if len(ma) == 1:
|
| 469 |
+
ma.append('')
|
| 470 |
+
if re.match(r"^\s", ma[0]):
|
| 471 |
+
Warning(
|
| 472 |
+
state=state, config=self.incfg,
|
| 473 |
+
testid='misc-extra-space',
|
| 474 |
+
message=f"MISC attribute name starts with space in '{ma[0]}={ma[1]}'."
|
| 475 |
+
).confirm()
|
| 476 |
+
elif re.search(r"\s$", ma[0]):
|
| 477 |
+
Warning(
|
| 478 |
+
state=state, config=self.incfg,
|
| 479 |
+
testid='misc-extra-space',
|
| 480 |
+
message=f"MISC attribute name ends with space in '{ma[0]}={ma[1]}'."
|
| 481 |
+
).confirm()
|
| 482 |
+
elif re.match(r"^\s", ma[1]):
|
| 483 |
+
Warning(
|
| 484 |
+
state=state, config=self.incfg,
|
| 485 |
+
testid='misc-extra-space',
|
| 486 |
+
message=f"MISC attribute value starts with space in '{ma[0]}={ma[1]}'."
|
| 487 |
+
).confirm()
|
| 488 |
+
elif re.search(r"\s$", ma[1]):
|
| 489 |
+
Warning(
|
| 490 |
+
state=state, config=self.incfg,
|
| 491 |
+
testid='misc-extra-space',
|
| 492 |
+
message=f"MISC attribute value ends with space in '{ma[0]}={ma[1]}'."
|
| 493 |
+
).confirm()
|
| 494 |
+
if re.match(r"^(SpaceAfter|Lang|Translit|LTranslit|Gloss|LId|LDeriv|Ref)$", ma[0]):
|
| 495 |
+
mamap.setdefault(ma[0], 0)
|
| 496 |
+
mamap[ma[0]] = mamap[ma[0]] + 1
|
| 497 |
+
elif re.match(r"^\s*(spaceafter|lang|translit|ltranslit|gloss|lid|lderiv|ref)\s*$", ma[0], re.IGNORECASE):
|
| 498 |
+
Warning(
|
| 499 |
+
state=state, config=self.incfg,
|
| 500 |
+
testid='misc-attr-typo',
|
| 501 |
+
message=f"Possible typo (case or spaces) in MISC attribute '{ma[0]}={ma[1]}'."
|
| 502 |
+
).confirm()
|
| 503 |
+
for a in list(mamap):
|
| 504 |
+
if mamap[a] > 1:
|
| 505 |
+
Error(
|
| 506 |
+
state=state, config=self.incfg,
|
| 507 |
+
testclass=TestClass.FORMAT, # this one is real error
|
| 508 |
+
testid='repeated-misc',
|
| 509 |
+
message=f"MISC attribute '{a}' not supposed to occur twice"
|
| 510 |
+
).confirm()
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
#------------------------------------------------------------------------------
|
| 515 |
+
# Level 2 tests applicable to the whole sentence.
|
| 516 |
+
#------------------------------------------------------------------------------
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def check_id_references(self, state):
|
| 521 |
+
"""
|
| 522 |
+
Verifies that HEAD and DEPS reference existing IDs. If this function does
|
| 523 |
+
not return True, most of the other tests should be skipped for the current
|
| 524 |
+
sentence (in particular anything that considers the tree structure).
|
| 525 |
+
|
| 526 |
+
Parameters
|
| 527 |
+
----------
|
| 528 |
+
state : udtools.state.State
|
| 529 |
+
The state of the validation run.
|
| 530 |
+
|
| 531 |
+
Reads from state
|
| 532 |
+
----------------
|
| 533 |
+
current_token_node_table : list(list(str))
|
| 534 |
+
The list of multiword token lines / regular node lines / empty node
|
| 535 |
+
lines, each split to fields (columns).
|
| 536 |
+
sentence_line : int
|
| 537 |
+
The line number (relative to input file, 1-based) of the first
|
| 538 |
+
node/token line in the current sentence.
|
| 539 |
+
|
| 540 |
+
Incidents
|
| 541 |
+
---------
|
| 542 |
+
invalid-head
|
| 543 |
+
unknown-head
|
| 544 |
+
invalid-deps
|
| 545 |
+
invalid-ehead
|
| 546 |
+
unknown-ehead
|
| 547 |
+
|
| 548 |
+
Returns
|
| 549 |
+
-------
|
| 550 |
+
ok : bool
|
| 551 |
+
Is it OK to run subsequent checks? It can be OK even after some
|
| 552 |
+
less severe errors.
|
| 553 |
+
"""
|
| 554 |
+
ok = True
|
| 555 |
+
Incident.default_level = 2
|
| 556 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 557 |
+
ids = set([cols[ID] for cols in state.current_token_node_table if utils.is_word(cols) or utils.is_empty_node(cols)])
|
| 558 |
+
for i in range(len(state.current_token_node_table)):
|
| 559 |
+
lineno = state.sentence_line + i
|
| 560 |
+
cols = state.current_token_node_table[i]
|
| 561 |
+
if utils.is_multiword_token(cols):
|
| 562 |
+
continue
|
| 563 |
+
# Test the basic HEAD only for non-empty nodes.
|
| 564 |
+
# We have checked elsewhere that it is empty for empty nodes.
|
| 565 |
+
if not utils.is_empty_node(cols):
|
| 566 |
+
match = utils.crex.head.fullmatch(cols[HEAD])
|
| 567 |
+
if match is None:
|
| 568 |
+
Error(
|
| 569 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 570 |
+
testid='invalid-head',
|
| 571 |
+
message=f"Invalid HEAD: '{cols[HEAD]}'."
|
| 572 |
+
).confirm()
|
| 573 |
+
ok = False
|
| 574 |
+
if not (cols[HEAD] in ids or cols[HEAD] == '0'):
|
| 575 |
+
Error(
|
| 576 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 577 |
+
testclass=TestClass.SYNTAX,
|
| 578 |
+
testid='unknown-head',
|
| 579 |
+
message=f"Undefined HEAD (no such ID): '{cols[HEAD]}'."
|
| 580 |
+
).confirm()
|
| 581 |
+
ok = False
|
| 582 |
+
try:
|
| 583 |
+
deps = utils.deps_list(cols)
|
| 584 |
+
except ValueError:
|
| 585 |
+
# Similar errors have probably been reported earlier.
|
| 586 |
+
Error(
|
| 587 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 588 |
+
testid='invalid-deps',
|
| 589 |
+
message=f"Failed to parse DEPS: '{cols[DEPS]}'."
|
| 590 |
+
).confirm()
|
| 591 |
+
ok = False
|
| 592 |
+
continue
|
| 593 |
+
for head, deprel in deps:
|
| 594 |
+
match = utils.crex.ehead.fullmatch(head)
|
| 595 |
+
if match is None:
|
| 596 |
+
Error(
|
| 597 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 598 |
+
testid='invalid-ehead',
|
| 599 |
+
message=f"Invalid enhanced head reference: '{head}'."
|
| 600 |
+
).confirm()
|
| 601 |
+
ok = False
|
| 602 |
+
if not (head in ids or head == '0'):
|
| 603 |
+
Error(
|
| 604 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 605 |
+
testclass=TestClass.ENHANCED,
|
| 606 |
+
testid='unknown-ehead',
|
| 607 |
+
message=f"Undefined enhanced head reference (no such ID): '{head}'."
|
| 608 |
+
).confirm()
|
| 609 |
+
ok = False
|
| 610 |
+
return ok
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
def check_tree(self, state):
|
| 615 |
+
"""
|
| 616 |
+
Takes the list of non-comment lines (line = list of columns) describing
|
| 617 |
+
a sentence. Returns an array with line number corresponding to each tree
|
| 618 |
+
node. In case of fatal problems (missing HEAD etc.) returns None
|
| 619 |
+
(and reports the error, unless it is something that should have been
|
| 620 |
+
reported earlier).
|
| 621 |
+
|
| 622 |
+
We will assume that this function is called only if both ID and HEAD
|
| 623 |
+
values have been found valid for all tree nodes, including the sequence
|
| 624 |
+
of IDs and the references from HEAD to existing IDs.
|
| 625 |
+
|
| 626 |
+
This function originally served to build a data structure that would
|
| 627 |
+
describe the tree and make it accessible during subsequent tests. Now we
|
| 628 |
+
use the Udapi data structures instead but we still have to call this
|
| 629 |
+
function first because it will survive and report ill-formed input. In
|
| 630 |
+
such a case, the Udapi data structure will not be built and Udapi-based
|
| 631 |
+
tests will be skipped.
|
| 632 |
+
|
| 633 |
+
Parameters
|
| 634 |
+
----------
|
| 635 |
+
state : udtools.state.State
|
| 636 |
+
The state of the validation run.
|
| 637 |
+
|
| 638 |
+
Reads from state
|
| 639 |
+
----------------
|
| 640 |
+
current_token_node_table : list(list(str))
|
| 641 |
+
The list of multiword token lines / regular node lines / empty node
|
| 642 |
+
lines, each split to fields (columns).
|
| 643 |
+
sentence_line : int
|
| 644 |
+
The line number (relative to input file, 1-based) of the first
|
| 645 |
+
node/token line in the current sentence.
|
| 646 |
+
|
| 647 |
+
Incidents
|
| 648 |
+
---------
|
| 649 |
+
head-self-loop
|
| 650 |
+
multiple-roots
|
| 651 |
+
non-tree
|
| 652 |
+
|
| 653 |
+
Returns
|
| 654 |
+
-------
|
| 655 |
+
ok : bool
|
| 656 |
+
Is it OK to run subsequent checks? It can be OK even after some
|
| 657 |
+
less severe errors.
|
| 658 |
+
"""
|
| 659 |
+
Incident.default_level = 2
|
| 660 |
+
Incident.default_testclass = TestClass.SYNTAX
|
| 661 |
+
children = {} # int(node id) -> set of children
|
| 662 |
+
n_words = 0
|
| 663 |
+
for i in range(len(state.current_token_node_table)):
|
| 664 |
+
lineno = state.sentence_line + i
|
| 665 |
+
cols = state.current_token_node_table[i]
|
| 666 |
+
if not utils.is_word(cols):
|
| 667 |
+
continue
|
| 668 |
+
n_words += 1
|
| 669 |
+
# ID and HEAD values have been validated before and this function would
|
| 670 |
+
# not be called if they were not OK. So we can now safely convert them
|
| 671 |
+
# to integers.
|
| 672 |
+
id_ = int(cols[ID])
|
| 673 |
+
head = int(cols[HEAD])
|
| 674 |
+
if head == id_:
|
| 675 |
+
Error(
|
| 676 |
+
state=state, config=self.incfg, lineno=lineno,
|
| 677 |
+
testid='head-self-loop',
|
| 678 |
+
message=f'HEAD == ID for {cols[ID]}'
|
| 679 |
+
).confirm()
|
| 680 |
+
return False
|
| 681 |
+
# Incrementally build the set of children of every node.
|
| 682 |
+
children.setdefault(head, set()).add(id_)
|
| 683 |
+
word_ids = list(range(1, n_words+1))
|
| 684 |
+
# Check that there is just one node with the root relation.
|
| 685 |
+
children_0 = sorted(children.get(0, []))
|
| 686 |
+
if len(children_0) > 1:
|
| 687 |
+
references = [Reference(filename=state.current_file_name,
|
| 688 |
+
lineno=state.sentence_line + i,
|
| 689 |
+
sentid=state.sentence_id,
|
| 690 |
+
nodeid=i)
|
| 691 |
+
for i in children_0]
|
| 692 |
+
Error(
|
| 693 |
+
state=state, config=self.incfg, lineno=state.sentence_line,
|
| 694 |
+
testid='multiple-roots',
|
| 695 |
+
message=f"Multiple root words: {children_0}",
|
| 696 |
+
references=references
|
| 697 |
+
).confirm()
|
| 698 |
+
return False
|
| 699 |
+
# Return None if there are any cycles. Otherwise we could not later ask
|
| 700 |
+
# Udapi to built a data structure representing the tree.
|
| 701 |
+
# Presence of cycles is equivalent to presence of unreachable nodes.
|
| 702 |
+
projection = set()
|
| 703 |
+
node_id = 0
|
| 704 |
+
nodes = list((node_id,))
|
| 705 |
+
while nodes:
|
| 706 |
+
node_id = nodes.pop()
|
| 707 |
+
children_id = sorted(children.get(node_id, []))
|
| 708 |
+
for child in children_id:
|
| 709 |
+
if child in projection:
|
| 710 |
+
continue # skip cycles
|
| 711 |
+
projection.add(child)
|
| 712 |
+
nodes.append(child)
|
| 713 |
+
unreachable = set(word_ids) - projection
|
| 714 |
+
if unreachable:
|
| 715 |
+
str_unreachable = ','.join(str(w) for w in sorted(unreachable))
|
| 716 |
+
Error(
|
| 717 |
+
state=state, config=self.incfg, lineno=state.sentence_line,
|
| 718 |
+
testid='non-tree',
|
| 719 |
+
message=f'Non-tree structure. Words {str_unreachable} are not reachable from the root 0.'
|
| 720 |
+
).confirm()
|
| 721 |
+
return False
|
| 722 |
+
return True
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
def check_deps_all_or_none(self, state):
|
| 728 |
+
"""
|
| 729 |
+
Checks that enhanced dependencies are present if they were present in
|
| 730 |
+
another sentence, and absent if they were absent in another sentence.
|
| 731 |
+
|
| 732 |
+
Parameters
|
| 733 |
+
----------
|
| 734 |
+
state : udtools.state.State
|
| 735 |
+
The state of the validation run.
|
| 736 |
+
|
| 737 |
+
Reads from state
|
| 738 |
+
----------------
|
| 739 |
+
current_token_node_table : list(list(str))
|
| 740 |
+
The list of multiword token lines / regular node lines / empty node
|
| 741 |
+
lines, each split to fields (columns).
|
| 742 |
+
sentence_line : int
|
| 743 |
+
The line number (relative to input file, 1-based) of the first
|
| 744 |
+
node/token line in the current sentence.
|
| 745 |
+
|
| 746 |
+
Incidents
|
| 747 |
+
---------
|
| 748 |
+
edeps-only-sometimes
|
| 749 |
+
"""
|
| 750 |
+
egraph_exists = False # enhanced deps are optional
|
| 751 |
+
for i in range(len(state.current_token_node_table)):
|
| 752 |
+
cols = state.current_token_node_table[i]
|
| 753 |
+
if utils.is_multiword_token(cols):
|
| 754 |
+
continue
|
| 755 |
+
if utils.is_empty_node(cols) or cols[DEPS] != '_':
|
| 756 |
+
egraph_exists = True
|
| 757 |
+
# We are currently testing the existence of enhanced graphs separately for each sentence.
|
| 758 |
+
# However, we should not allow that one sentence has a connected egraph and another
|
| 759 |
+
# has no enhanced dependencies. Such inconsistency could come as a nasty surprise
|
| 760 |
+
# to the users.
|
| 761 |
+
Incident.default_lineno = state.sentence_line
|
| 762 |
+
Incident.default_level = 2
|
| 763 |
+
Incident.default_testclass = TestClass.ENHANCED
|
| 764 |
+
if egraph_exists:
|
| 765 |
+
if not state.seen_enhanced_graph:
|
| 766 |
+
state.seen_enhanced_graph = state.sentence_line
|
| 767 |
+
if state.seen_tree_without_enhanced_graph:
|
| 768 |
+
Error(
|
| 769 |
+
state=state, config=self.incfg,
|
| 770 |
+
testid='edeps-only-sometimes',
|
| 771 |
+
message=f"Enhanced graph must be empty because we saw empty DEPS on line {state.seen_tree_without_enhanced_graph}"
|
| 772 |
+
).confirm()
|
| 773 |
+
else:
|
| 774 |
+
if not state.seen_tree_without_enhanced_graph:
|
| 775 |
+
state.seen_tree_without_enhanced_graph = state.sentence_line
|
| 776 |
+
if state.seen_enhanced_graph:
|
| 777 |
+
Error(
|
| 778 |
+
state=state, config=self.incfg,
|
| 779 |
+
testid='edeps-only-sometimes',
|
| 780 |
+
message=f"Enhanced graph cannot be empty because we saw non-empty DEPS on line {state.seen_enhanced_graph}"
|
| 781 |
+
).confirm()
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
|
| 785 |
+
def check_egraph_connected(self, state, nodes):
|
| 786 |
+
"""
|
| 787 |
+
Takes the list of nodes (including empty nodes). If there are enhanced
|
| 788 |
+
dependencies in DEPS, builds the enhanced graph and checks that it is
|
| 789 |
+
rooted and connected.
|
| 790 |
+
|
| 791 |
+
Parameters
|
| 792 |
+
----------
|
| 793 |
+
state : udtools.state.State
|
| 794 |
+
The state of the validation run.
|
| 795 |
+
nodes : list of udapi.core.node.Node objects
|
| 796 |
+
List of nodes in the sentence, including empty nodes, sorted by word
|
| 797 |
+
order.
|
| 798 |
+
|
| 799 |
+
Reads from state
|
| 800 |
+
----------------
|
| 801 |
+
current_node_linenos : dict(str: int)
|
| 802 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 803 |
+
the input file.
|
| 804 |
+
|
| 805 |
+
Incidents
|
| 806 |
+
---------
|
| 807 |
+
unconnected-egraph
|
| 808 |
+
"""
|
| 809 |
+
egraph_exists = False # enhanced deps are optional
|
| 810 |
+
egraph = {'0': {'children': set()}}
|
| 811 |
+
nodeids = set()
|
| 812 |
+
for node in nodes:
|
| 813 |
+
parents = [x['parent'] for x in node.deps]
|
| 814 |
+
if node.is_empty() or len(parents) > 0:
|
| 815 |
+
egraph_exists = True
|
| 816 |
+
nodeids.add(str(node.ord))
|
| 817 |
+
# The graph may already contain a record for the current node if one of
|
| 818 |
+
# the previous nodes is its child. If it doesn't, we will create it now.
|
| 819 |
+
egraph.setdefault(str(node.ord), {})
|
| 820 |
+
egraph[str(node.ord)].setdefault('children', set())
|
| 821 |
+
# Incrementally build the set of children of every node.
|
| 822 |
+
for p in parents:
|
| 823 |
+
egraph.setdefault(str(p.ord), {})
|
| 824 |
+
egraph[str(p.ord)].setdefault('children', set()).add(str(node.ord))
|
| 825 |
+
# If there is no trace of enhanced annotation, there are no requirements
|
| 826 |
+
# on the enhanced graph.
|
| 827 |
+
if not egraph_exists:
|
| 828 |
+
return
|
| 829 |
+
# Check that the graph is rooted and connected. The UD guidelines do not
|
| 830 |
+
# license unconnected graphs. Projection of the technical root (ord '0')
|
| 831 |
+
# must contain all nodes.
|
| 832 |
+
projection = set()
|
| 833 |
+
node_id = '0'
|
| 834 |
+
projnodes = list((node_id,))
|
| 835 |
+
while projnodes:
|
| 836 |
+
node_id = projnodes.pop()
|
| 837 |
+
for child in egraph[node_id]['children']:
|
| 838 |
+
if child in projection:
|
| 839 |
+
continue # skip cycles
|
| 840 |
+
projection.add(child)
|
| 841 |
+
projnodes.append(child)
|
| 842 |
+
unreachable = nodeids - projection
|
| 843 |
+
if unreachable:
|
| 844 |
+
sur = sorted(unreachable)
|
| 845 |
+
Error(
|
| 846 |
+
state=state, config=self.incfg,
|
| 847 |
+
lineno=state.current_node_linenos[sur[0]],
|
| 848 |
+
level=2,
|
| 849 |
+
testclass=TestClass.ENHANCED,
|
| 850 |
+
testid='unconnected-egraph',
|
| 851 |
+
message=f"Enhanced graph is not connected. Nodes {sur} are not reachable from any root"
|
| 852 |
+
).confirm()
|
| 853 |
+
return None
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
#------------------------------------------------------------------------------
|
| 858 |
+
# Level 2 tests of sentence metadata.
|
| 859 |
+
#------------------------------------------------------------------------------
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
def check_sent_id(self, state):
|
| 864 |
+
"""
|
| 865 |
+
Checks that sentence id exists, is well-formed and unique.
|
| 866 |
+
|
| 867 |
+
Parameters
|
| 868 |
+
----------
|
| 869 |
+
state : udtools.state.State
|
| 870 |
+
The state of the validation run.
|
| 871 |
+
|
| 872 |
+
Reads from state
|
| 873 |
+
----------------
|
| 874 |
+
current_lines : list(str)
|
| 875 |
+
List of lines in the sentence (comments and tokens), including
|
| 876 |
+
final empty line. The lines are not expected to include the final
|
| 877 |
+
newline character.
|
| 878 |
+
First we expect an optional block (zero or more lines) of comments,
|
| 879 |
+
i.e., lines starting with '#'. Then we expect a non-empty block
|
| 880 |
+
(one or more lines) of nodes, empty nodes, and multiword tokens.
|
| 881 |
+
Finally, we expect exactly one empty line.
|
| 882 |
+
comment_start_line : int
|
| 883 |
+
The line number (relative to input file, 1-based) of the first line
|
| 884 |
+
in the current sentence, including comments if any.
|
| 885 |
+
sentence_line : int
|
| 886 |
+
The line number (relative to input file, 1-based) of the first
|
| 887 |
+
node/token line in the current sentence.
|
| 888 |
+
known_sent_ids : set
|
| 889 |
+
Sentence ids already seen in this treebank.
|
| 890 |
+
|
| 891 |
+
Writes to state
|
| 892 |
+
----------------
|
| 893 |
+
known_sent_ids : set
|
| 894 |
+
Sentence ids already seen in this treebank.
|
| 895 |
+
|
| 896 |
+
Incidents
|
| 897 |
+
---------
|
| 898 |
+
invalid-sent-id
|
| 899 |
+
missing-sent-id
|
| 900 |
+
multiple-sent-id
|
| 901 |
+
non-unique-sent-id
|
| 902 |
+
slash-in-sent-id
|
| 903 |
+
"""
|
| 904 |
+
Incident.default_level = 2
|
| 905 |
+
Incident.default_testclass = TestClass.METADATA
|
| 906 |
+
Incident.default_lineno = -1 # use the first line after the comments
|
| 907 |
+
n_comment_lines = state.sentence_line-state.comment_start_line
|
| 908 |
+
comments = state.current_lines[0:n_comment_lines]
|
| 909 |
+
matched = []
|
| 910 |
+
for c in comments:
|
| 911 |
+
match = utils.crex.sentid.fullmatch(c)
|
| 912 |
+
if match:
|
| 913 |
+
matched.append(match)
|
| 914 |
+
else:
|
| 915 |
+
if c.startswith('# sent_id') or c.startswith('#sent_id'):
|
| 916 |
+
Error(
|
| 917 |
+
state=state, config=self.incfg,
|
| 918 |
+
testid='invalid-sent-id',
|
| 919 |
+
message=f"Spurious sent_id line: '{c}' should look like '# sent_id = xxxxx' where xxxxx is not whitespace. Forward slash reserved for special purposes."
|
| 920 |
+
).confirm()
|
| 921 |
+
if not matched:
|
| 922 |
+
Error(
|
| 923 |
+
state=state, config=self.incfg,
|
| 924 |
+
testid='missing-sent-id',
|
| 925 |
+
message='Missing the sent_id attribute.'
|
| 926 |
+
).confirm()
|
| 927 |
+
elif len(matched) > 1:
|
| 928 |
+
Error(
|
| 929 |
+
state=state, config=self.incfg,
|
| 930 |
+
testid='multiple-sent-id',
|
| 931 |
+
message='Multiple sent_id attributes.'
|
| 932 |
+
).confirm()
|
| 933 |
+
else:
|
| 934 |
+
# Uniqueness of sentence ids should be tested treebank-wide, not just file-wide.
|
| 935 |
+
# For that to happen, all three files should be tested at once.
|
| 936 |
+
sid = matched[0].group(1)
|
| 937 |
+
if sid in state.known_sent_ids:
|
| 938 |
+
Error(
|
| 939 |
+
state=state, config=self.incfg,
|
| 940 |
+
testid='non-unique-sent-id',
|
| 941 |
+
message=f"Non-unique sent_id attribute '{sid}'."
|
| 942 |
+
).confirm()
|
| 943 |
+
if sid.count('/') > 1 or (sid.count('/') == 1 and self.lang != 'ud'):
|
| 944 |
+
Error(
|
| 945 |
+
state=state, config=self.incfg,
|
| 946 |
+
testid='slash-in-sent-id',
|
| 947 |
+
message=f"The forward slash is reserved for special use in parallel treebanks: '{sid}'"
|
| 948 |
+
).confirm()
|
| 949 |
+
state.known_sent_ids.add(sid)
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
|
| 953 |
+
def check_parallel_id(self, state):
|
| 954 |
+
"""
|
| 955 |
+
The parallel_id sentence-level comment is used after sent_id of
|
| 956 |
+
sentences that are parallel translations of sentences in other
|
| 957 |
+
treebanks. Like sent_id, it must be well-formed and unique. Unlike
|
| 958 |
+
sent_id, it is optional. Sentences that do not have it are not
|
| 959 |
+
parallel.
|
| 960 |
+
|
| 961 |
+
Parameters
|
| 962 |
+
----------
|
| 963 |
+
state : udtools.state.State
|
| 964 |
+
The state of the validation run.
|
| 965 |
+
|
| 966 |
+
Reads from state
|
| 967 |
+
----------------
|
| 968 |
+
current_lines : list(str)
|
| 969 |
+
List of lines in the sentence (comments and tokens), including
|
| 970 |
+
final empty line. The lines are not expected to include the final
|
| 971 |
+
newline character.
|
| 972 |
+
First we expect an optional block (zero or more lines) of comments,
|
| 973 |
+
i.e., lines starting with '#'. Then we expect a non-empty block
|
| 974 |
+
(one or more lines) of nodes, empty nodes, and multiword tokens.
|
| 975 |
+
Finally, we expect exactly one empty line.
|
| 976 |
+
comment_start_line : int
|
| 977 |
+
The line number (relative to input file, 1-based) of the first line
|
| 978 |
+
in the current sentence, including comments if any.
|
| 979 |
+
sentence_line : int
|
| 980 |
+
The line number (relative to input file, 1-based) of the first
|
| 981 |
+
node/token line in the current sentence.
|
| 982 |
+
known_parallel_ids : set
|
| 983 |
+
Parallel sentence ids already seen in this treebank.
|
| 984 |
+
parallel_id_lastalt : dict
|
| 985 |
+
parallel_id_lastpart : dict
|
| 986 |
+
|
| 987 |
+
Writes to state
|
| 988 |
+
----------------
|
| 989 |
+
known_parallel_ids : set
|
| 990 |
+
Parallel sentence ids already seen in this treebank.
|
| 991 |
+
parallel_id_lastalt : dict
|
| 992 |
+
parallel_id_lastpart : dict
|
| 993 |
+
|
| 994 |
+
Incidents
|
| 995 |
+
---------
|
| 996 |
+
invalid-parallel-id
|
| 997 |
+
multiple-parallel-id
|
| 998 |
+
non-unique-parallel-id
|
| 999 |
+
parallel-id-alt
|
| 1000 |
+
parallel-id-part
|
| 1001 |
+
"""
|
| 1002 |
+
Incident.default_level = 2
|
| 1003 |
+
Incident.default_testclass = TestClass.METADATA
|
| 1004 |
+
Incident.default_lineno = -1 # use the first line after the comments
|
| 1005 |
+
n_comment_lines = state.sentence_line-state.comment_start_line
|
| 1006 |
+
comments = state.current_lines[0:n_comment_lines]
|
| 1007 |
+
matched = []
|
| 1008 |
+
for c in comments:
|
| 1009 |
+
match = utils.crex.parallelid.fullmatch(c)
|
| 1010 |
+
if match:
|
| 1011 |
+
matched.append(match)
|
| 1012 |
+
else:
|
| 1013 |
+
if c.startswith('# parallel_id') or c.startswith('#parallel_id'):
|
| 1014 |
+
Error(
|
| 1015 |
+
state=state, config=self.incfg,
|
| 1016 |
+
testid='invalid-parallel-id',
|
| 1017 |
+
message=f"Spurious parallel_id line: '{c}' should look like '# parallel_id = corpus/sentence' where corpus is [a-z]+ and sentence is [-0-9a-z]. Optionally, '/alt[1-9][0-9]*' and/or 'part[1-9][0-9]*' may follow."
|
| 1018 |
+
).confirm()
|
| 1019 |
+
if len(matched) > 1:
|
| 1020 |
+
Error(
|
| 1021 |
+
state=state, config=self.incfg,
|
| 1022 |
+
testid='multiple-parallel-id',
|
| 1023 |
+
message='Multiple parallel_id attributes.'
|
| 1024 |
+
).confirm()
|
| 1025 |
+
elif matched:
|
| 1026 |
+
# Uniqueness of parallel ids should be tested treebank-wide, not just file-wide.
|
| 1027 |
+
# For that to happen, all three files should be tested at once.
|
| 1028 |
+
pid = matched[0].group(1)
|
| 1029 |
+
if pid in state.known_parallel_ids:
|
| 1030 |
+
Error(
|
| 1031 |
+
state=state, config=self.incfg,
|
| 1032 |
+
testid='non-unique-parallel-id',
|
| 1033 |
+
message=f"Non-unique parallel_id attribute '{pid}'."
|
| 1034 |
+
).confirm()
|
| 1035 |
+
else:
|
| 1036 |
+
# Additional tests when pid has altN or partN.
|
| 1037 |
+
# Do them only if the whole pid is unique.
|
| 1038 |
+
sid = matched[0].group(2) + '/' + matched[0].group(3)
|
| 1039 |
+
alt = None
|
| 1040 |
+
part = None
|
| 1041 |
+
altpart = matched[0].group(4)
|
| 1042 |
+
if altpart:
|
| 1043 |
+
apmatch = re.fullmatch(r"(?:alt([0-9]+))?(?:part([0-9]+))?", altpart)
|
| 1044 |
+
if apmatch:
|
| 1045 |
+
alt = apmatch.group(1)
|
| 1046 |
+
part = apmatch.group(2)
|
| 1047 |
+
if alt:
|
| 1048 |
+
alt = int(alt)
|
| 1049 |
+
if part:
|
| 1050 |
+
part = int(part)
|
| 1051 |
+
if sid in state.parallel_id_lastalt:
|
| 1052 |
+
if state.parallel_id_lastalt[sid] == None and alt != None or state.parallel_id_lastalt[sid] != None and alt == None:
|
| 1053 |
+
Error(
|
| 1054 |
+
state=state, config=self.incfg,
|
| 1055 |
+
testid='parallel-id-alt',
|
| 1056 |
+
message=f"Some instances of parallel sentence '{sid}' have the 'alt' suffix while others do not."
|
| 1057 |
+
).confirm()
|
| 1058 |
+
elif alt != None and alt != state.parallel_id_lastalt[sid] + 1:
|
| 1059 |
+
Error(
|
| 1060 |
+
state=state, config=self.incfg,
|
| 1061 |
+
testid='parallel-id-alt',
|
| 1062 |
+
message=f"The alt suffix of parallel sentence '{sid}' should be {state.parallel_id_lastalt[sid]}+1 but it is {alt}."
|
| 1063 |
+
).confirm()
|
| 1064 |
+
elif alt != None and alt != 1:
|
| 1065 |
+
Error(
|
| 1066 |
+
state=state, config=self.incfg,
|
| 1067 |
+
testid='parallel-id-alt',
|
| 1068 |
+
message=f"The alt suffix of parallel sentence '{sid}' should be 1 but it is {alt}."
|
| 1069 |
+
).confirm()
|
| 1070 |
+
state.parallel_id_lastalt[sid] = alt
|
| 1071 |
+
if sid in state.parallel_id_lastpart:
|
| 1072 |
+
if state.parallel_id_lastpart[sid] == None and part != None or state.parallel_id_lastpart[sid] != None and part == None:
|
| 1073 |
+
Error(
|
| 1074 |
+
state=state, config=self.incfg,
|
| 1075 |
+
testid='parallel-id-part',
|
| 1076 |
+
message=f"Some instances of parallel sentence '{sid}' have the 'part' suffix while others do not."
|
| 1077 |
+
).confirm()
|
| 1078 |
+
elif part != None and part != state.parallel_id_lastpart[sid] + 1:
|
| 1079 |
+
Error(
|
| 1080 |
+
state=state, config=self.incfg,
|
| 1081 |
+
testid='parallel-id-part',
|
| 1082 |
+
message=f"The part suffix of parallel sentence '{sid}' should be {state.parallel_id_lastpart[sid]}+1 but it is {part}."
|
| 1083 |
+
).confirm()
|
| 1084 |
+
elif part != None and part != 1:
|
| 1085 |
+
Error(
|
| 1086 |
+
state=state, config=self.incfg,
|
| 1087 |
+
testid='parallel-id-part',
|
| 1088 |
+
message=f"The part suffix of parallel sentence '{sid}' should be 1 but it is {part}."
|
| 1089 |
+
).confirm()
|
| 1090 |
+
state.parallel_id_lastpart[sid] = part
|
| 1091 |
+
state.known_parallel_ids.add(pid)
|
| 1092 |
+
|
| 1093 |
+
|
| 1094 |
+
|
| 1095 |
+
def check_text_meta(self, state):
|
| 1096 |
+
"""
|
| 1097 |
+
Checks metadata other than sentence id, that is, document breaks, paragraph
|
| 1098 |
+
breaks and sentence text (which is also compared to the sequence of the
|
| 1099 |
+
forms of individual tokens, and the spaces vs. SpaceAfter=No in MISC).
|
| 1100 |
+
|
| 1101 |
+
Parameters
|
| 1102 |
+
----------
|
| 1103 |
+
state : udtools.state.State
|
| 1104 |
+
The state of the validation run.
|
| 1105 |
+
|
| 1106 |
+
Reads from state
|
| 1107 |
+
----------------
|
| 1108 |
+
current_lines : list(str)
|
| 1109 |
+
List of lines in the sentence (comments and tokens), including
|
| 1110 |
+
final empty line. The lines are not expected to include the final
|
| 1111 |
+
newline character.
|
| 1112 |
+
First we expect an optional block (zero or more lines) of comments,
|
| 1113 |
+
i.e., lines starting with '#'. Then we expect a non-empty block
|
| 1114 |
+
(one or more lines) of nodes, empty nodes, and multiword tokens.
|
| 1115 |
+
Finally, we expect exactly one empty line.
|
| 1116 |
+
comment_start_line : int
|
| 1117 |
+
The line number (relative to input file, 1-based) of the first line
|
| 1118 |
+
in the current sentence, including comments if any.
|
| 1119 |
+
current_token_node_table : list(list(str))
|
| 1120 |
+
The list of multiword token lines / regular node lines / empty node
|
| 1121 |
+
lines, each split to fields (columns).
|
| 1122 |
+
sentence_line : int
|
| 1123 |
+
The line number (relative to input file, 1-based) of the first
|
| 1124 |
+
node/token line in the current sentence.
|
| 1125 |
+
known_parallel_ids : set
|
| 1126 |
+
Parallel sentence ids already seen in this treebank.
|
| 1127 |
+
parallel_id_lastalt : dict
|
| 1128 |
+
parallel_id_lastpart : dict
|
| 1129 |
+
|
| 1130 |
+
Writes to state
|
| 1131 |
+
----------------
|
| 1132 |
+
known_parallel_ids : set
|
| 1133 |
+
Parallel sentence ids already seen in this treebank.
|
| 1134 |
+
parallel_id_lastalt : dict
|
| 1135 |
+
parallel_id_lastpart : dict
|
| 1136 |
+
|
| 1137 |
+
Incidents
|
| 1138 |
+
---------
|
| 1139 |
+
multiple-newdoc
|
| 1140 |
+
multiple-newpar
|
| 1141 |
+
spaceafter-newdocpar
|
| 1142 |
+
missing-text
|
| 1143 |
+
multiple-text
|
| 1144 |
+
text-trailing-whitespace
|
| 1145 |
+
nospaceafter-yes
|
| 1146 |
+
spaceafter-value
|
| 1147 |
+
spaceafter-empty-node
|
| 1148 |
+
spaceafter-mwt-node
|
| 1149 |
+
text-form-mismatch
|
| 1150 |
+
missing-spaceafter
|
| 1151 |
+
text-extra-chars
|
| 1152 |
+
"""
|
| 1153 |
+
Incident.default_level = 2
|
| 1154 |
+
Incident.default_testclass = TestClass.METADATA
|
| 1155 |
+
Incident.default_lineno = -1 # use the first line after the comments
|
| 1156 |
+
n_comment_lines = state.sentence_line-state.comment_start_line
|
| 1157 |
+
comments = state.current_lines[0:n_comment_lines]
|
| 1158 |
+
newdoc_matched = []
|
| 1159 |
+
newpar_matched = []
|
| 1160 |
+
text_matched = []
|
| 1161 |
+
for c in comments:
|
| 1162 |
+
newdoc_match = utils.crex.newdoc.fullmatch(c)
|
| 1163 |
+
if newdoc_match:
|
| 1164 |
+
newdoc_matched.append(newdoc_match)
|
| 1165 |
+
newpar_match = utils.crex.newpar.fullmatch(c)
|
| 1166 |
+
if newpar_match:
|
| 1167 |
+
newpar_matched.append(newpar_match)
|
| 1168 |
+
text_match = utils.crex.text.fullmatch(c)
|
| 1169 |
+
if text_match:
|
| 1170 |
+
text_matched.append(text_match)
|
| 1171 |
+
if len(newdoc_matched) > 1:
|
| 1172 |
+
Error(
|
| 1173 |
+
state=state, config=self.incfg,
|
| 1174 |
+
testid='multiple-newdoc',
|
| 1175 |
+
message='Multiple newdoc attributes.'
|
| 1176 |
+
).confirm()
|
| 1177 |
+
if len(newpar_matched) > 1:
|
| 1178 |
+
Error(
|
| 1179 |
+
state=state, config=self.incfg,
|
| 1180 |
+
testid='multiple-newpar',
|
| 1181 |
+
message='Multiple newpar attributes.'
|
| 1182 |
+
).confirm()
|
| 1183 |
+
if (newdoc_matched or newpar_matched) and state.spaceafterno_in_effect:
|
| 1184 |
+
Error(
|
| 1185 |
+
state=state, config=self.incfg,
|
| 1186 |
+
testid='spaceafter-newdocpar',
|
| 1187 |
+
message='New document or paragraph starts when the last token of the previous sentence says SpaceAfter=No.'
|
| 1188 |
+
).confirm()
|
| 1189 |
+
if not text_matched:
|
| 1190 |
+
Error(
|
| 1191 |
+
state=state, config=self.incfg,
|
| 1192 |
+
testid='missing-text',
|
| 1193 |
+
message='Missing the text attribute.'
|
| 1194 |
+
).confirm()
|
| 1195 |
+
elif len(text_matched) > 1:
|
| 1196 |
+
Error(
|
| 1197 |
+
state=state, config=self.incfg,
|
| 1198 |
+
testid='multiple-text',
|
| 1199 |
+
message='Multiple text attributes.'
|
| 1200 |
+
).confirm()
|
| 1201 |
+
else:
|
| 1202 |
+
stext = text_matched[0].group(1)
|
| 1203 |
+
if stext[-1].isspace():
|
| 1204 |
+
Error(
|
| 1205 |
+
state=state, config=self.incfg,
|
| 1206 |
+
testid='text-trailing-whitespace',
|
| 1207 |
+
message='The text attribute must not end with whitespace.'
|
| 1208 |
+
).confirm()
|
| 1209 |
+
# Validate the text against the SpaceAfter attribute in MISC.
|
| 1210 |
+
skip_words = set()
|
| 1211 |
+
mismatch_reported = 0 # do not report multiple mismatches in the same sentence; they usually have the same cause
|
| 1212 |
+
for iline in range(len(state.current_token_node_table)):
|
| 1213 |
+
cols = state.current_token_node_table[iline]
|
| 1214 |
+
if 'NoSpaceAfter=Yes' in cols[MISC]: # I leave this without the split("|") to catch all
|
| 1215 |
+
Error(
|
| 1216 |
+
state=state, config=self.incfg,
|
| 1217 |
+
testid='nospaceafter-yes',
|
| 1218 |
+
message="'NoSpaceAfter=Yes' should be replaced with 'SpaceAfter=No'."
|
| 1219 |
+
).confirm()
|
| 1220 |
+
if len([x for x in cols[MISC].split('|') if re.match(r"^SpaceAfter=", x) and x != 'SpaceAfter=No']) > 0:
|
| 1221 |
+
Error(
|
| 1222 |
+
state=state, config=self.incfg,
|
| 1223 |
+
lineno=state.sentence_line+iline,
|
| 1224 |
+
testid='spaceafter-value',
|
| 1225 |
+
message="Unexpected value of the 'SpaceAfter' attribute in MISC. Did you mean 'SpacesAfter'?"
|
| 1226 |
+
).confirm()
|
| 1227 |
+
if utils.is_empty_node(cols):
|
| 1228 |
+
if 'SpaceAfter=No' in cols[MISC]: # I leave this without the split("|") to catch all
|
| 1229 |
+
Error(
|
| 1230 |
+
state=state, config=self.incfg,
|
| 1231 |
+
lineno=state.sentence_line+iline,
|
| 1232 |
+
testid='spaceafter-empty-node',
|
| 1233 |
+
message="'SpaceAfter=No' cannot occur with empty nodes."
|
| 1234 |
+
).confirm()
|
| 1235 |
+
continue
|
| 1236 |
+
elif utils.is_multiword_token(cols):
|
| 1237 |
+
beg, end = cols[ID].split('-')
|
| 1238 |
+
begi, endi = int(beg), int(end)
|
| 1239 |
+
# If we see a multi-word token, add its words to an ignore-set – these will be skipped, and also checked for absence of SpaceAfter=No.
|
| 1240 |
+
for i in range(begi, endi+1):
|
| 1241 |
+
skip_words.add(str(i))
|
| 1242 |
+
elif cols[ID] in skip_words:
|
| 1243 |
+
if 'SpaceAfter=No' in cols[MISC]:
|
| 1244 |
+
Error(
|
| 1245 |
+
state=state, config=self.incfg,
|
| 1246 |
+
lineno=state.sentence_line+iline,
|
| 1247 |
+
testid='spaceafter-mwt-node',
|
| 1248 |
+
message="'SpaceAfter=No' cannot occur with words that are part of a multi-word token."
|
| 1249 |
+
).confirm()
|
| 1250 |
+
continue
|
| 1251 |
+
else:
|
| 1252 |
+
# Err, I guess we have nothing to do here. :)
|
| 1253 |
+
pass
|
| 1254 |
+
# So now we have either a multi-word token or a word which is also a token in its entirety.
|
| 1255 |
+
if not stext.startswith(cols[FORM]):
|
| 1256 |
+
if not mismatch_reported:
|
| 1257 |
+
extra_message = ''
|
| 1258 |
+
if len(stext) >= 1 and stext[0].isspace():
|
| 1259 |
+
extra_message = ' (perhaps extra SpaceAfter=No at previous token?)'
|
| 1260 |
+
Error(
|
| 1261 |
+
state=state, config=self.incfg,
|
| 1262 |
+
lineno=state.sentence_line+iline,
|
| 1263 |
+
testid='text-form-mismatch',
|
| 1264 |
+
message=f"Mismatch between the text attribute and the FORM field. Form[{cols[ID]}] is '{cols[FORM]}' but text is '{stext[:len(cols[FORM])+20]}...'"+extra_message
|
| 1265 |
+
).confirm()
|
| 1266 |
+
mismatch_reported = 1
|
| 1267 |
+
else:
|
| 1268 |
+
stext = stext[len(cols[FORM]):] # eat the form
|
| 1269 |
+
# Remember if SpaceAfter=No applies to the last word of the sentence.
|
| 1270 |
+
# This is not prohibited in general but it is prohibited at the end of a paragraph or document.
|
| 1271 |
+
if 'SpaceAfter=No' in cols[MISC].split("|"):
|
| 1272 |
+
state.spaceafterno_in_effect = True
|
| 1273 |
+
else:
|
| 1274 |
+
state.spaceafterno_in_effect = False
|
| 1275 |
+
if (stext) and not stext[0].isspace():
|
| 1276 |
+
Error(
|
| 1277 |
+
state=state, config=self.incfg,
|
| 1278 |
+
lineno=state.sentence_line+iline,
|
| 1279 |
+
testid='missing-spaceafter',
|
| 1280 |
+
message=f"'SpaceAfter=No' is missing in the MISC field of node {cols[ID]} because the text is '{utils.shorten(cols[FORM]+stext)}'."
|
| 1281 |
+
).confirm()
|
| 1282 |
+
stext = stext.lstrip()
|
| 1283 |
+
if stext:
|
| 1284 |
+
Error(
|
| 1285 |
+
state=state, config=self.incfg,
|
| 1286 |
+
testid='text-extra-chars',
|
| 1287 |
+
message=f"Extra characters at the end of the text attribute, not accounted for in the FORM fields: '{stext}'"
|
| 1288 |
+
).confirm()
|
ud-tools/udtools/src/udtools/level3.py
ADDED
|
@@ -0,0 +1,1157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
# According to https://stackoverflow.com/questions/1832893/python-regex-matching-unicode-properties,
|
| 6 |
+
# the regex module has the same API as re but it can check Unicode character properties using \p{}
|
| 7 |
+
# as in Perl.
|
| 8 |
+
#import re
|
| 9 |
+
import regex as re
|
| 10 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 11 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 12 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 13 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 14 |
+
# from udtools import Validator.
|
| 15 |
+
try:
|
| 16 |
+
import udtools.src.udtools.utils as utils
|
| 17 |
+
from udtools.src.udtools.incident import Incident, Error, Warning, TestClass
|
| 18 |
+
from udtools.src.udtools.level2 import Level2
|
| 19 |
+
except ModuleNotFoundError:
|
| 20 |
+
import udtools.utils as utils
|
| 21 |
+
from udtools.incident import Incident, Error, Warning, TestClass
|
| 22 |
+
from udtools.level2 import Level2
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Level3(Level2):
|
| 27 |
+
#==============================================================================
|
| 28 |
+
# Level 3 tests. Annotation content vs. the guidelines (only universal tests).
|
| 29 |
+
#==============================================================================
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def check_required_feature(self, state, feats, required_feature, required_value, incident):
|
| 33 |
+
"""
|
| 34 |
+
In general, the annotation of morphological features is optional, although
|
| 35 |
+
highly encouraged. However, if the treebank does have features, then certain
|
| 36 |
+
features become required. This function will check the presence of a feature
|
| 37 |
+
and if it is missing, an error will be reported only if at least one feature
|
| 38 |
+
has been already encountered. Otherwise the error will be remembered and it
|
| 39 |
+
may be reported afterwards if any feature is encountered later.
|
| 40 |
+
|
| 41 |
+
Parameters
|
| 42 |
+
----------
|
| 43 |
+
feats : udapi.core.dualdict.DualDict object
|
| 44 |
+
The feature-value set to be tested whether they contain the required one.
|
| 45 |
+
required_feature : str
|
| 46 |
+
The name of the required feature.
|
| 47 |
+
required_value : str
|
| 48 |
+
The required value of the feature. Multivalues are not supported (they
|
| 49 |
+
are just a string value containing one or more commas). If
|
| 50 |
+
required_value is None or an empty string, it means that we require any
|
| 51 |
+
non-empty value of required_feature.
|
| 52 |
+
incident : Incident object
|
| 53 |
+
The message that should be printed if the error is confirmed.
|
| 54 |
+
"""
|
| 55 |
+
ok = True
|
| 56 |
+
if required_value:
|
| 57 |
+
if feats[required_feature] != required_value:
|
| 58 |
+
ok = False
|
| 59 |
+
else:
|
| 60 |
+
if feats[required_feature] == '':
|
| 61 |
+
ok = False
|
| 62 |
+
if not ok:
|
| 63 |
+
if state.seen_morpho_feature:
|
| 64 |
+
incident.confirm()
|
| 65 |
+
else:
|
| 66 |
+
if not incident.testid in state.delayed_feature_errors:
|
| 67 |
+
state.delayed_feature_errors[incident.testid] = {'occurrences': []}
|
| 68 |
+
state.delayed_feature_errors[incident.testid]['occurrences'].append({'incident': incident})
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def check_expected_features(self, state, node):
|
| 72 |
+
"""
|
| 73 |
+
Certain features are expected to occur with certain UPOS or certain values
|
| 74 |
+
of other features. This function issues warnings instead of errors, as
|
| 75 |
+
features are in general optional and language-specific. Even the warnings
|
| 76 |
+
are issued only if the treebank has features. Note that the expectations
|
| 77 |
+
tested here are considered (more or less) universal. Checking that a given
|
| 78 |
+
feature-value pair is compatible with a particular UPOS is done using
|
| 79 |
+
language-specific lists at level 4.
|
| 80 |
+
|
| 81 |
+
Parameters
|
| 82 |
+
----------
|
| 83 |
+
state : udtools.state.State
|
| 84 |
+
The state of the validation run.
|
| 85 |
+
node : udapi.core.node.Node object
|
| 86 |
+
The tree node to be tested.
|
| 87 |
+
|
| 88 |
+
Reads from state
|
| 89 |
+
----------------
|
| 90 |
+
current_node_linenos : dict(str: int)
|
| 91 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 92 |
+
the input file.
|
| 93 |
+
|
| 94 |
+
Incidents
|
| 95 |
+
---------
|
| 96 |
+
pron-det-without-prontype
|
| 97 |
+
verbform-fin-without-mood
|
| 98 |
+
"""
|
| 99 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 100 |
+
Incident.default_level = 3
|
| 101 |
+
Incident.default_testclass = TestClass.MORPHO
|
| 102 |
+
if node.upos in ['PRON', 'DET']:
|
| 103 |
+
self.check_required_feature(state, node.feats, 'PronType', None, Warning(
|
| 104 |
+
state=state, config=self.incfg,
|
| 105 |
+
testid='pron-det-without-prontype',
|
| 106 |
+
message=f"The word '{utils.formtl(node)}' is tagged '{node.upos}' but it lacks the 'PronType' feature"
|
| 107 |
+
))
|
| 108 |
+
# See https://github.com/UniversalDependencies/docs/issues/1155 for
|
| 109 |
+
# complaints about this warning.
|
| 110 |
+
if node.feats['VerbForm'] == 'Fin' and node.feats['Mood'] == '':
|
| 111 |
+
Warning(
|
| 112 |
+
state=state, config=self.incfg,
|
| 113 |
+
testid='verbform-fin-without-mood',
|
| 114 |
+
message=f"Finite verb '{utils.formtl(node)}' lacks the 'Mood' feature"
|
| 115 |
+
).confirm()
|
| 116 |
+
# We have to exclude AUX from the following test because they could be
|
| 117 |
+
# nonverbal and Mood could be their lexical feature
|
| 118 |
+
# (see https://github.com/UniversalDependencies/docs/issues/1147).
|
| 119 |
+
# Update: Lithuanian seems to need Mood=Nec with participles. Turning the test off.
|
| 120 |
+
#elif node.feats['Mood'] != '' and node.feats['VerbForm'] != 'Fin' and not (node.upos == 'AUX' and node.feats['VerbForm'] == ''):
|
| 121 |
+
# Warning(
|
| 122 |
+
# state=state, config=self.incfg,
|
| 123 |
+
# testid='mood-without-verbform-fin',
|
| 124 |
+
# message=f"Non-empty 'Mood' feature at a word that is not finite verb ('{utils.formtl(node)}')"
|
| 125 |
+
# ).confirm()
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def check_zero_root(self, state, node):
|
| 130 |
+
"""
|
| 131 |
+
Checks that DEPREL is "root" iff HEAD is 0.
|
| 132 |
+
|
| 133 |
+
Parameters
|
| 134 |
+
----------
|
| 135 |
+
state : udtools.state.State
|
| 136 |
+
The state of the validation run.
|
| 137 |
+
node : udapi.core.node.Node object
|
| 138 |
+
The node whose incoming relation will be validated. This function
|
| 139 |
+
operates on both regular and empty nodes. Make sure to call it for
|
| 140 |
+
empty nodes, too!
|
| 141 |
+
|
| 142 |
+
Reads from state
|
| 143 |
+
----------------
|
| 144 |
+
current_node_linenos : dict(str: int)
|
| 145 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 146 |
+
the input file.
|
| 147 |
+
|
| 148 |
+
Incidents
|
| 149 |
+
---------
|
| 150 |
+
0-is-not-root
|
| 151 |
+
root-is-not-0
|
| 152 |
+
enhanced-0-is-not-root
|
| 153 |
+
enhanced-root-is-not-0
|
| 154 |
+
"""
|
| 155 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 156 |
+
Incident.default_level = 3
|
| 157 |
+
Incident.default_testclass = TestClass.SYNTAX
|
| 158 |
+
if not node.is_empty():
|
| 159 |
+
if node.parent.ord == 0 and node.udeprel != 'root':
|
| 160 |
+
Error(
|
| 161 |
+
state=state, config=self.incfg,
|
| 162 |
+
testid='0-is-not-root',
|
| 163 |
+
message="DEPREL must be 'root' if HEAD is 0."
|
| 164 |
+
).confirm()
|
| 165 |
+
if node.parent.ord != 0 and node.udeprel == 'root':
|
| 166 |
+
Error(
|
| 167 |
+
state=state, config=self.incfg,
|
| 168 |
+
testid='root-is-not-0',
|
| 169 |
+
message="DEPREL cannot be 'root' if HEAD is not 0."
|
| 170 |
+
).confirm()
|
| 171 |
+
# In the enhanced graph, test both regular and empty roots.
|
| 172 |
+
for edep in node.deps:
|
| 173 |
+
if edep['parent'].ord == 0 and utils.lspec2ud(edep['deprel']) != 'root':
|
| 174 |
+
Error(
|
| 175 |
+
state=state, config=self.incfg,
|
| 176 |
+
testclass=TestClass.ENHANCED,
|
| 177 |
+
testid='enhanced-0-is-not-root',
|
| 178 |
+
message="Enhanced relation type must be 'root' if head is 0."
|
| 179 |
+
).confirm()
|
| 180 |
+
if edep['parent'].ord != 0 and utils.lspec2ud(edep['deprel']) == 'root':
|
| 181 |
+
Error(
|
| 182 |
+
state=state, config=self.incfg,
|
| 183 |
+
testclass=TestClass.ENHANCED,
|
| 184 |
+
testid='enhanced-root-is-not-0',
|
| 185 |
+
message="Enhanced relation type cannot be 'root' if head is not 0."
|
| 186 |
+
).confirm()
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def check_upos_vs_deprel(self, state, node):
|
| 191 |
+
"""
|
| 192 |
+
For certain relations checks that the dependent word belongs to an expected
|
| 193 |
+
part-of-speech category. Occasionally we may have to check the children of
|
| 194 |
+
the node, too.
|
| 195 |
+
|
| 196 |
+
Parameters
|
| 197 |
+
----------
|
| 198 |
+
state : udtools.state.State
|
| 199 |
+
The state of the validation run.
|
| 200 |
+
node : udapi.core.node.Node object
|
| 201 |
+
The tree node to be tested.
|
| 202 |
+
|
| 203 |
+
Reads from state
|
| 204 |
+
----------------
|
| 205 |
+
current_node_linenos : dict(str: int)
|
| 206 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 207 |
+
the input file.
|
| 208 |
+
|
| 209 |
+
Incidents
|
| 210 |
+
---------
|
| 211 |
+
fixed-without-extpos
|
| 212 |
+
rel-upos-det
|
| 213 |
+
rel-upos-nummod
|
| 214 |
+
rel-upos-advmod
|
| 215 |
+
rel-upos-expl
|
| 216 |
+
rel-upos-aux
|
| 217 |
+
rel-upos-cop
|
| 218 |
+
rel-upos-case
|
| 219 |
+
rel-upos-mark
|
| 220 |
+
rel-upos-cc
|
| 221 |
+
rel-upos-punct
|
| 222 |
+
upos-rel-punct
|
| 223 |
+
rel-upos-fixed
|
| 224 |
+
"""
|
| 225 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 226 |
+
Incident.default_level = 3
|
| 227 |
+
Incident.default_testclass = TestClass.SYNTAX
|
| 228 |
+
# Occasionally a word may be marked by the feature ExtPos as acting as
|
| 229 |
+
# a part of speech different from its usual one (which is given in UPOS).
|
| 230 |
+
# Typical examples are words that head fixed multiword expressions (the
|
| 231 |
+
# whole expression acts like a word of that alien part of speech), but
|
| 232 |
+
# ExtPos may be used also on single words whose external POS is altered.
|
| 233 |
+
upos = node.upos
|
| 234 |
+
# Nodes with a fixed child may need ExtPos to signal the part of speech of
|
| 235 |
+
# the whole fixed expression.
|
| 236 |
+
if node.feats['ExtPos']:
|
| 237 |
+
upos = node.feats['ExtPos']
|
| 238 |
+
# This is a level 3 test, we will check only the universal part of the relation.
|
| 239 |
+
deprel = node.udeprel
|
| 240 |
+
childrels = set([x.udeprel for x in node.children])
|
| 241 |
+
# It is recommended that the head of a fixed expression always has ExtPos,
|
| 242 |
+
# even if it does not need it to pass the tests in this function.
|
| 243 |
+
if 'fixed' in childrels and not node.feats['ExtPos']:
|
| 244 |
+
fixed_forms = [node.form] + [x.form for x in node.children if x.udeprel == 'fixed']
|
| 245 |
+
str_fixed_forms = ' '.join(fixed_forms)
|
| 246 |
+
Warning(
|
| 247 |
+
state=state, config=self.incfg,
|
| 248 |
+
testid='fixed-without-extpos',
|
| 249 |
+
message=f"Fixed expression '{str_fixed_forms}' does not have the 'ExtPos' feature"
|
| 250 |
+
).confirm()
|
| 251 |
+
# Certain relations are reserved for nominals and cannot be used for verbs.
|
| 252 |
+
# Nevertheless, they can appear with adjectives or adpositions if they are promoted due to ellipsis.
|
| 253 |
+
# Unfortunately, we cannot enforce this test because a word can be cited
|
| 254 |
+
# rather than used, and then it can take a nominal function even if it is
|
| 255 |
+
# a verb, as in this Upper Sorbian sentence where infinitives are appositions:
|
| 256 |
+
# [hsb] Z werba danci "rejować" móže substantiw nastać danco "reja", adjektiw danca "rejowanski" a adwerb dance "rejowansce", ale tež z substantiwa martelo "hamor" móže nastać werb marteli "klepać z hamorom", adjektiw martela "hamorowy" a adwerb martele "z hamorom".
|
| 257 |
+
# Determiner can alternate with a pronoun.
|
| 258 |
+
if deprel == 'det' and not re.match(r"^(DET|PRON)", upos):
|
| 259 |
+
Error(
|
| 260 |
+
state=state, config=self.incfg,
|
| 261 |
+
testid='rel-upos-det',
|
| 262 |
+
message=f"'det' should be 'DET' or 'PRON' but it is '{upos}' ('{utils.formtl(node)}')"
|
| 263 |
+
).confirm()
|
| 264 |
+
# Nummod is for "number phrases" only. This could be interpreted as NUM only,
|
| 265 |
+
# but some languages treat some cardinal numbers as NOUNs, and in
|
| 266 |
+
# https://github.com/UniversalDependencies/docs/issues/596,
|
| 267 |
+
# we concluded that the validator will tolerate them.
|
| 268 |
+
if deprel == 'nummod' and not re.match(r"^(NUM|NOUN|SYM)$", upos):
|
| 269 |
+
Error(
|
| 270 |
+
state=state, config=self.incfg,
|
| 271 |
+
testid='rel-upos-nummod',
|
| 272 |
+
message=f"'nummod' should be 'NUM' but it is '{upos}' ('{utils.formtl(node)}')"
|
| 273 |
+
).confirm()
|
| 274 |
+
# Advmod is for adverbs, perhaps particles but not for prepositional phrases or clauses.
|
| 275 |
+
# Nevertheless, we should allow adjectives because they can be used as adverbs in some languages.
|
| 276 |
+
# https://github.com/UniversalDependencies/docs/issues/617#issuecomment-488261396
|
| 277 |
+
# Bohdan reports that some DET can modify adjectives in a way similar to ADV.
|
| 278 |
+
# I am not sure whether advmod is the best relation for them but the alternative
|
| 279 |
+
# det is not much better, so maybe we should not enforce it. Adding DET to the tolerated UPOS tags.
|
| 280 |
+
if deprel == 'advmod' and not re.match(r"^(ADV|ADJ|CCONJ|DET|PART|SYM)", upos) and not 'goeswith' in childrels:
|
| 281 |
+
Error(
|
| 282 |
+
state=state, config=self.incfg,
|
| 283 |
+
testid='rel-upos-advmod',
|
| 284 |
+
message=f"'advmod' should be 'ADV' but it is '{upos}' ('{utils.formtl(node)}')"
|
| 285 |
+
).confirm()
|
| 286 |
+
# Known expletives are pronouns. Determiners and particles are probably acceptable, too.
|
| 287 |
+
if deprel == 'expl' and not re.match(r"^(PRON|DET|PART)$", upos):
|
| 288 |
+
Error(
|
| 289 |
+
state=state, config=self.incfg,
|
| 290 |
+
testid='rel-upos-expl',
|
| 291 |
+
message=f"'expl' should normally be 'PRON' but it is '{upos}' ('{utils.formtl(node)}')"
|
| 292 |
+
).confirm()
|
| 293 |
+
# Auxiliary verb/particle must be AUX.
|
| 294 |
+
if deprel == 'aux' and not re.match(r"^(AUX)", upos):
|
| 295 |
+
Error(
|
| 296 |
+
state=state, config=self.incfg,
|
| 297 |
+
testid='rel-upos-aux',
|
| 298 |
+
message=f"'aux' should be 'AUX' but it is '{upos}' ('{utils.formtl(node)}')"
|
| 299 |
+
).confirm()
|
| 300 |
+
# Copula is an auxiliary verb/particle (AUX) or a pronoun (PRON|DET).
|
| 301 |
+
if deprel == 'cop' and not re.match(r"^(AUX|PRON|DET|SYM)", upos):
|
| 302 |
+
Error(
|
| 303 |
+
state=state, config=self.incfg,
|
| 304 |
+
testid='rel-upos-cop',
|
| 305 |
+
message=f"'cop' should be 'AUX' or 'PRON'/'DET' but it is '{upos}' ('{utils.formtl(node)}')"
|
| 306 |
+
).confirm()
|
| 307 |
+
# Case is normally an adposition, maybe particle.
|
| 308 |
+
# However, there are also secondary adpositions and they may have the original POS tag:
|
| 309 |
+
# NOUN: [cs] pomocí, prostřednictvím
|
| 310 |
+
# VERB: [en] including
|
| 311 |
+
# Interjection can also act as case marker for vocative, as in Sanskrit: भोः भगवन् / bhoḥ bhagavan / oh sir.
|
| 312 |
+
if deprel == 'case' and re.match(r"^(PROPN|ADJ|PRON|DET|NUM|AUX)", upos):
|
| 313 |
+
Error(
|
| 314 |
+
state=state, config=self.incfg,
|
| 315 |
+
testid='rel-upos-case',
|
| 316 |
+
message=f"'case' should not be '{upos}' ('{utils.formtl(node)}')"
|
| 317 |
+
).confirm()
|
| 318 |
+
# Mark is normally a conjunction or adposition, maybe particle but definitely not a pronoun.
|
| 319 |
+
###!!! February 2022: Temporarily allow mark+VERB ("regarding"). In the future, it should be banned again
|
| 320 |
+
###!!! by default (and case+VERB too), but there should be a language-specific list of exceptions.
|
| 321 |
+
###!!! In 2024 I wanted to re-enable the test because people could use the
|
| 322 |
+
###!!! newly approved ExtPos feature to signal that "regarding" is acting
|
| 323 |
+
###!!! as a function word, but Amir was opposed to the idea that ExtPos would
|
| 324 |
+
###!!! now be required also for single-word expressions.
|
| 325 |
+
if deprel == 'mark' and re.match(r"^(NOUN|PROPN|ADJ|PRON|DET|NUM|AUX|INTJ)", upos):
|
| 326 |
+
Error(
|
| 327 |
+
state=state, config=self.incfg,
|
| 328 |
+
testid='rel-upos-mark',
|
| 329 |
+
message=f"'mark' should not be '{upos}' ('{utils.formtl(node)}')"
|
| 330 |
+
).confirm()
|
| 331 |
+
# Cc is a conjunction, possibly an adverb or particle.
|
| 332 |
+
if deprel == 'cc' and re.match(r"^(NOUN|PROPN|ADJ|PRON|DET|NUM|VERB|AUX|INTJ)", upos):
|
| 333 |
+
Error(
|
| 334 |
+
state=state, config=self.incfg,
|
| 335 |
+
testid='rel-upos-cc',
|
| 336 |
+
message=f"'cc' should not be '{upos}' ('{utils.formtl(node)}')"
|
| 337 |
+
).confirm()
|
| 338 |
+
if deprel == 'punct' and upos != 'PUNCT':
|
| 339 |
+
Error(
|
| 340 |
+
state=state, config=self.incfg,
|
| 341 |
+
testid='rel-upos-punct',
|
| 342 |
+
message=f"'punct' must be 'PUNCT' but it is '{upos}' ('{utils.formtl(node)}')"
|
| 343 |
+
).confirm()
|
| 344 |
+
if upos == 'PUNCT' and not re.match(r"^(punct|root)", deprel):
|
| 345 |
+
Error(
|
| 346 |
+
state=state, config=self.incfg,
|
| 347 |
+
testid='upos-rel-punct',
|
| 348 |
+
message=f"'PUNCT' must be 'punct' but it is '{node.deprel}' ('{utils.formtl(node)}')"
|
| 349 |
+
).confirm()
|
| 350 |
+
if upos == 'PROPN' and (deprel == 'fixed' or 'fixed' in childrels):
|
| 351 |
+
Error(
|
| 352 |
+
state=state, config=self.incfg,
|
| 353 |
+
testid='rel-upos-fixed',
|
| 354 |
+
message=f"'fixed' should not be used for proper nouns ('{utils.formtl(node)}')."
|
| 355 |
+
).confirm()
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def check_flat_foreign(self, state, node):
|
| 360 |
+
"""
|
| 361 |
+
flat:foreign is an optional subtype of flat. It is used to connect two words
|
| 362 |
+
in a code-switched segment of foreign words if the annotators did not want
|
| 363 |
+
to provide the analysis according to the source language. If flat:foreign
|
| 364 |
+
is used, both the parent and the child should have the Foreign=Yes feature
|
| 365 |
+
and their UPOS tag should be X.
|
| 366 |
+
|
| 367 |
+
Parameters
|
| 368 |
+
----------
|
| 369 |
+
state : udtools.state.State
|
| 370 |
+
The state of the validation run.
|
| 371 |
+
node : udapi.core.node.Node object
|
| 372 |
+
The tree node to be tested.
|
| 373 |
+
|
| 374 |
+
Reads from state
|
| 375 |
+
----------------
|
| 376 |
+
current_node_linenos : dict(str: int)
|
| 377 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 378 |
+
the input file.
|
| 379 |
+
|
| 380 |
+
Incidents
|
| 381 |
+
---------
|
| 382 |
+
flat-foreign-upos-feats
|
| 383 |
+
"""
|
| 384 |
+
Incident.default_level = 3
|
| 385 |
+
Incident.default_testclass = TestClass.MORPHO
|
| 386 |
+
if node.deprel != 'flat:foreign':
|
| 387 |
+
return
|
| 388 |
+
parent = node.parent
|
| 389 |
+
if node.upos != 'X' or str(node.feats) != 'Foreign=Yes':
|
| 390 |
+
Warning(
|
| 391 |
+
state=state, config=self.incfg,
|
| 392 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 393 |
+
nodeid=node.ord,
|
| 394 |
+
testid='flat-foreign-upos-feats',
|
| 395 |
+
message="The child of a flat:foreign relation should have UPOS X and Foreign=Yes (but no other features)."
|
| 396 |
+
).confirm()
|
| 397 |
+
if parent.upos != 'X' or str(parent.feats) != 'Foreign=Yes':
|
| 398 |
+
Warning(
|
| 399 |
+
state=state, config=self.incfg,
|
| 400 |
+
lineno=state.current_node_linenos[str(parent.ord)],
|
| 401 |
+
nodeid=parent.ord,
|
| 402 |
+
testid='flat-foreign-upos-feats',
|
| 403 |
+
message="The parent of a flat:foreign relation should have UPOS X and Foreign=Yes (but no other features)."
|
| 404 |
+
).confirm()
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def check_left_to_right_relations(self, state, node):
|
| 409 |
+
"""
|
| 410 |
+
Certain UD relations must always go left-to-right (in the logical order,
|
| 411 |
+
meaning that parent precedes child, disregarding that some languages have
|
| 412 |
+
right-to-left writing systems).
|
| 413 |
+
Here we currently check the rule for the basic dependencies.
|
| 414 |
+
The same should also be tested for the enhanced dependencies!
|
| 415 |
+
|
| 416 |
+
Parameters
|
| 417 |
+
----------
|
| 418 |
+
state : udtools.state.State
|
| 419 |
+
The state of the validation run.
|
| 420 |
+
node : udapi.core.node.Node object
|
| 421 |
+
The tree node to be tested.
|
| 422 |
+
|
| 423 |
+
Reads from state
|
| 424 |
+
----------------
|
| 425 |
+
current_node_linenos : dict(str: int)
|
| 426 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 427 |
+
the input file.
|
| 428 |
+
|
| 429 |
+
Incidents
|
| 430 |
+
---------
|
| 431 |
+
right-to-left-conj
|
| 432 |
+
right-to-left-fixed
|
| 433 |
+
right-to-left-flat
|
| 434 |
+
right-to-left-goeswith
|
| 435 |
+
right-to-left-appos
|
| 436 |
+
"""
|
| 437 |
+
# According to the v2 guidelines, apposition should also be left-headed, although the definition of apposition may need to be improved.
|
| 438 |
+
if node.udeprel in ['conj', 'fixed', 'flat', 'goeswith', 'appos']:
|
| 439 |
+
ichild = node.ord
|
| 440 |
+
iparent = node.parent.ord
|
| 441 |
+
if ichild < iparent:
|
| 442 |
+
# We must recognize the relation type in the test id so we can manage exceptions for legacy treebanks.
|
| 443 |
+
# For conj, flat, and fixed the requirement was introduced already before UD 2.2.
|
| 444 |
+
# For appos and goeswith the requirement was introduced before UD 2.4.
|
| 445 |
+
# The designation "right-to-left" is confusing in languages with right-to-left writing systems.
|
| 446 |
+
# We keep it in the testid but we make the testmessage more neutral.
|
| 447 |
+
Error(
|
| 448 |
+
state=state, config=self.incfg,
|
| 449 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 450 |
+
nodeid=node.ord,
|
| 451 |
+
level=3,
|
| 452 |
+
testclass=TestClass.SYNTAX,
|
| 453 |
+
testid=f"right-to-left-{node.udeprel}",
|
| 454 |
+
message=f"Parent of relation '{node.deprel}' must precede the child in the word order."
|
| 455 |
+
).confirm()
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def check_single_subject(self, state, node):
|
| 460 |
+
"""
|
| 461 |
+
No predicate should have more than one subject.
|
| 462 |
+
An xcomp dependent normally has no subject, but in some languages the
|
| 463 |
+
requirement may be weaker: it could have an overt subject if it is
|
| 464 |
+
correferential with a particular argument of the matrix verb. Hence we do
|
| 465 |
+
not check zero subjects of xcomp dependents at present.
|
| 466 |
+
Furthermore, in some situations we must allow multiple subjects. If a clause
|
| 467 |
+
acts as a nonverbal predicate of another clause, then we must attach two
|
| 468 |
+
subjects to the predicate of the inner clause: one is the predicate of the
|
| 469 |
+
inner clause, the other is the predicate of the outer clause. This could in
|
| 470 |
+
theory be recursive but in practice it isn't. As of UD 2.10, an amendment
|
| 471 |
+
of the guidelines says that the inner predicate of the predicate clause
|
| 472 |
+
should govern both subjects even if there is a copula (previously such
|
| 473 |
+
cases were an exception from the UD approach that copulas should not be
|
| 474 |
+
heads); however, the outer subjects should be attached as [nc]subj:outer.
|
| 475 |
+
See https://universaldependencies.org/changes.html#multiple-subjects.
|
| 476 |
+
See also issue 34 (https://github.com/UniversalDependencies/tools/issues/34).
|
| 477 |
+
Strictly speaking, :outer is optional because it is a subtype, and some
|
| 478 |
+
treebanks may want to avoid it. For example, in Coptic Scriptorium, there
|
| 479 |
+
is only one occurrence in dev, one in test, and none in train, so it would
|
| 480 |
+
be impossible to train a parser that gets it right. For that reason, it is
|
| 481 |
+
possible to replace the :outer subtype with Subject=Outer in MISC. The MISC
|
| 482 |
+
attribute is just a directive for the validator and no parser is expected
|
| 483 |
+
to predict it.
|
| 484 |
+
|
| 485 |
+
Parameters
|
| 486 |
+
----------
|
| 487 |
+
state : udtools.state.State
|
| 488 |
+
The state of the validation run.
|
| 489 |
+
node : udapi.core.node.Node object
|
| 490 |
+
The tree node to be tested.
|
| 491 |
+
|
| 492 |
+
Reads from state
|
| 493 |
+
----------------
|
| 494 |
+
current_node_linenos : dict(str: int)
|
| 495 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 496 |
+
the input file.
|
| 497 |
+
|
| 498 |
+
Incidents
|
| 499 |
+
---------
|
| 500 |
+
too-many-subjects
|
| 501 |
+
"""
|
| 502 |
+
|
| 503 |
+
def is_inner_subject(node):
|
| 504 |
+
"""
|
| 505 |
+
Takes a node (udapi.core.node.Node). Tells whether the node's deprel is
|
| 506 |
+
nsubj or csubj without the :outer subtype. Alternatively, instead of the
|
| 507 |
+
:outer subtype, the node could have Subject=Outer in MISC.
|
| 508 |
+
"""
|
| 509 |
+
if not re.search(r'subj', node.udeprel):
|
| 510 |
+
return False
|
| 511 |
+
if re.match(r'^[nc]subj:outer$', node.deprel):
|
| 512 |
+
return False
|
| 513 |
+
if node.misc['Subject'] == 'Outer':
|
| 514 |
+
return False
|
| 515 |
+
return True
|
| 516 |
+
|
| 517 |
+
subjects = [x for x in node.children if is_inner_subject(x)]
|
| 518 |
+
subject_ids = [x.ord for x in subjects]
|
| 519 |
+
subject_forms = [utils.formtl(x) for x in subjects]
|
| 520 |
+
subject_references = utils.create_references(subjects, state, 'Subject')
|
| 521 |
+
if len(subjects) > 1:
|
| 522 |
+
Error(
|
| 523 |
+
state=state, config=self.incfg,
|
| 524 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 525 |
+
nodeid=node.ord,
|
| 526 |
+
level=3,
|
| 527 |
+
testclass=TestClass.SYNTAX,
|
| 528 |
+
testid='too-many-subjects',
|
| 529 |
+
message=f"Multiple subjects {str(subject_ids)} ({str(subject_forms)[1:-1]}) under the predicate '{utils.formtl(node)}' not subtyped as ':outer'.",
|
| 530 |
+
explanation="Outer subjects are allowed if a clause acts as the predicate of another clause.",
|
| 531 |
+
references=subject_references
|
| 532 |
+
).confirm()
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def check_single_object(self, state, node):
|
| 537 |
+
"""
|
| 538 |
+
No predicate should have more than one direct object (number of indirect
|
| 539 |
+
objects is unlimited). Theoretically, ccomp should be understood as a
|
| 540 |
+
clausal equivalent of a direct object, but we do not have an indirect
|
| 541 |
+
equivalent, so it seems better to tolerate additional ccomp at present.
|
| 542 |
+
|
| 543 |
+
Parameters
|
| 544 |
+
----------
|
| 545 |
+
state : udtools.state.State
|
| 546 |
+
The state of the validation run.
|
| 547 |
+
node : udapi.core.node.Node object
|
| 548 |
+
The tree node to be tested.
|
| 549 |
+
|
| 550 |
+
Reads from state
|
| 551 |
+
----------------
|
| 552 |
+
current_node_linenos : dict(str: int)
|
| 553 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 554 |
+
the input file.
|
| 555 |
+
|
| 556 |
+
Incidents
|
| 557 |
+
---------
|
| 558 |
+
too-many-objects
|
| 559 |
+
"""
|
| 560 |
+
objects = [x for x in node.children if x.udeprel == 'obj']
|
| 561 |
+
object_ids = [x.ord for x in objects]
|
| 562 |
+
object_forms = [utils.formtl(x) for x in objects]
|
| 563 |
+
object_references = utils.create_references(objects, state, 'Object')
|
| 564 |
+
if len(objects) > 1:
|
| 565 |
+
Error(
|
| 566 |
+
state=state, config=self.incfg,
|
| 567 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 568 |
+
nodeid=node.ord,
|
| 569 |
+
level=3,
|
| 570 |
+
testclass=TestClass.SYNTAX,
|
| 571 |
+
testid='too-many-objects',
|
| 572 |
+
message=f"Multiple direct objects {str(object_ids)} ({str(object_forms)[1:-1]}) under the predicate '{utils.formtl(node)}'.",
|
| 573 |
+
references=object_references
|
| 574 |
+
).confirm()
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def check_nmod_obl(self, state, node):
|
| 579 |
+
"""
|
| 580 |
+
The difference between nmod and obl is that the former modifies a
|
| 581 |
+
nominal while the latter modifies a predicate of a clause. Typically
|
| 582 |
+
the parent of nmod will be NOUN, PROPN or PRON; the parent of obl is
|
| 583 |
+
usually a VERB, sometimes ADJ or ADV. However, nominals can also be
|
| 584 |
+
predicates and then they may take obl dependents:
|
| 585 |
+
I am the leader of the group (nmod)
|
| 586 |
+
I am the leader on Mondays (obl)
|
| 587 |
+
This function tries to detect at least some cases where the nominal
|
| 588 |
+
is not a predicate and thus cannot take obl dependents.
|
| 589 |
+
|
| 590 |
+
Parameters
|
| 591 |
+
----------
|
| 592 |
+
state : udtools.state.State
|
| 593 |
+
The state of the validation run.
|
| 594 |
+
node : udapi.core.node.Node object
|
| 595 |
+
The tree node to be tested.
|
| 596 |
+
|
| 597 |
+
Reads from state
|
| 598 |
+
----------------
|
| 599 |
+
current_node_linenos : dict(str: int)
|
| 600 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 601 |
+
the input file.
|
| 602 |
+
|
| 603 |
+
Incidents
|
| 604 |
+
---------
|
| 605 |
+
obl-should-be-nmod
|
| 606 |
+
"""
|
| 607 |
+
if node.udeprel == 'obl' and node.parent.upos in ['NOUN', 'PROPN', 'PRON']:
|
| 608 |
+
# If the parent itself has certain deprels, we know that it is just
|
| 609 |
+
# a nominal and not a predicate. This will reveal some erroneous
|
| 610 |
+
# obliques but not all, because we will not recognize some non-
|
| 611 |
+
# predicative nominals, and even for the predicative ones, some
|
| 612 |
+
# dependents might be better analyzed as nmod.
|
| 613 |
+
if node.parent.udeprel in ['nsubj', 'obj', 'iobj', 'obl', 'vocative', 'dislocated', 'expl', 'nmod']:
|
| 614 |
+
# For the moment (2025-09-20), I am making this a warning only.
|
| 615 |
+
# But I suppose that it will became an error in the future.
|
| 616 |
+
Error(
|
| 617 |
+
state=state, config=self.incfg,
|
| 618 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 619 |
+
nodeid=node.ord,
|
| 620 |
+
level=3,
|
| 621 |
+
testclass=TestClass.SYNTAX,
|
| 622 |
+
testid='obl-should-be-nmod',
|
| 623 |
+
message=f"The parent (node [{node.parent.ord}] '{utils.formtl(node.parent)}') is a nominal (and not a predicate), hence the relation should be 'nmod', not 'obl'.",
|
| 624 |
+
references=utils.create_references([node.parent], state, 'Parent')
|
| 625 |
+
).confirm()
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def check_orphan(self, state, node):
|
| 630 |
+
"""
|
| 631 |
+
The orphan relation is used to attach an unpromoted orphan to the promoted
|
| 632 |
+
orphan in gapping constructions. A common error is that the promoted orphan
|
| 633 |
+
gets the orphan relation too. The parent of orphan is typically attached
|
| 634 |
+
via a conj relation, although some other relations are plausible too.
|
| 635 |
+
|
| 636 |
+
Parameters
|
| 637 |
+
----------
|
| 638 |
+
state : udtools.state.State
|
| 639 |
+
The state of the validation run.
|
| 640 |
+
node : udapi.core.node.Node object
|
| 641 |
+
The tree node to be tested.
|
| 642 |
+
|
| 643 |
+
Reads from state
|
| 644 |
+
----------------
|
| 645 |
+
current_node_linenos : dict(str: int)
|
| 646 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 647 |
+
the input file.
|
| 648 |
+
|
| 649 |
+
Incidents
|
| 650 |
+
---------
|
| 651 |
+
orphan-parent
|
| 652 |
+
"""
|
| 653 |
+
# This is a level 3 test, we will check only the universal part of the relation.
|
| 654 |
+
if node.udeprel == 'orphan':
|
| 655 |
+
# We include advcl because gapping (or something very similar) can also
|
| 656 |
+
# occur in subordinate clauses: "He buys companies like my mother [does] vegetables."
|
| 657 |
+
# In theory, a similar pattern could also occur with reparandum.
|
| 658 |
+
# A similar pattern also occurs with acl, e.g. in Latvian:
|
| 659 |
+
# viņš ēd tos ābolus, ko pirms tam [ēda] tārpi ('he eats the same apples, which were [eaten] by worms before that')
|
| 660 |
+
# Other clausal heads (ccomp, csubj) may be eligible as well, e.g. in Latvian
|
| 661 |
+
# (see also issue 635 2019-09-19):
|
| 662 |
+
# atjēdzos, ka bez angļu valodas nekur [netikšu] '[I] realised, that [I will get] nowhere without English'
|
| 663 |
+
# 2023-04-14: Reclassifying the test as warning only. Due to promotion,
|
| 664 |
+
# the parent of orphan may receive many other relations. See issue 635
|
| 665 |
+
# for details and a Latin example.
|
| 666 |
+
if not re.match(r"^(conj|parataxis|root|csubj|ccomp|advcl|acl|reparandum)$", node.parent.udeprel):
|
| 667 |
+
Warning(
|
| 668 |
+
state=state, config=self.incfg,
|
| 669 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 670 |
+
nodeid=node.ord,
|
| 671 |
+
level=3,
|
| 672 |
+
testclass=TestClass.SYNTAX,
|
| 673 |
+
testid='orphan-parent',
|
| 674 |
+
message=f"The parent of 'orphan' should normally be 'conj' but it is '{node.parent.udeprel}'.",
|
| 675 |
+
references=utils.create_references([node.parent], state, 'Parent')
|
| 676 |
+
).confirm()
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
def check_functional_leaves(self, state, node):
|
| 681 |
+
"""
|
| 682 |
+
Most of the time, function-word nodes should be leaves. This function
|
| 683 |
+
checks for known exceptions and warns in the other cases.
|
| 684 |
+
(https://universaldependencies.org/u/overview/syntax.html#function-word-modifiers)
|
| 685 |
+
|
| 686 |
+
Parameters
|
| 687 |
+
----------
|
| 688 |
+
state : udtools.state.State
|
| 689 |
+
The state of the validation run.
|
| 690 |
+
node : udapi.core.node.Node object
|
| 691 |
+
The tree node to be tested.
|
| 692 |
+
|
| 693 |
+
Reads from state
|
| 694 |
+
----------------
|
| 695 |
+
current_node_linenos : dict(str: int)
|
| 696 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 697 |
+
the input file.
|
| 698 |
+
|
| 699 |
+
Incidents
|
| 700 |
+
---------
|
| 701 |
+
leaf-mark-case
|
| 702 |
+
leaf-aux-cop
|
| 703 |
+
leaf-det
|
| 704 |
+
leaf-clf
|
| 705 |
+
leaf-cc
|
| 706 |
+
leaf-fixed
|
| 707 |
+
leaf-goeswith
|
| 708 |
+
leaf-punct
|
| 709 |
+
"""
|
| 710 |
+
# This is a level 3 test, we will check only the universal part of the relation.
|
| 711 |
+
deprel = node.udeprel
|
| 712 |
+
if deprel in ['case', 'mark', 'cc', 'aux', 'cop', 'det', 'clf', 'fixed', 'goeswith', 'punct']:
|
| 713 |
+
idparent = node.ord
|
| 714 |
+
pdeprel = deprel
|
| 715 |
+
pfeats = node.feats
|
| 716 |
+
for child in node.children:
|
| 717 |
+
idchild = child.ord
|
| 718 |
+
Incident.default_lineno = state.current_node_linenos[str(idchild)]
|
| 719 |
+
Incident.default_level = 3
|
| 720 |
+
Incident.default_testclass = TestClass.SYNTAX
|
| 721 |
+
cdeprel = child.udeprel
|
| 722 |
+
# The guidelines explicitly say that negation can modify any function word
|
| 723 |
+
# (see https://universaldependencies.org/u/overview/syntax.html#function-word-modifiers).
|
| 724 |
+
# We cannot recognize negation simply by deprel; we have to look at the
|
| 725 |
+
# part-of-speech tag and the Polarity feature as well.
|
| 726 |
+
cupos = child.upos
|
| 727 |
+
cfeats = child.feats
|
| 728 |
+
if pdeprel != 'punct' and cdeprel == 'advmod' and re.match(r"^(PART|ADV)$", cupos) and cfeats['Polarity'] == 'Neg':
|
| 729 |
+
continue
|
| 730 |
+
# Punctuation should not depend on function words if it can be projectively
|
| 731 |
+
# attached to a content word. But sometimes it cannot. Czech example:
|
| 732 |
+
# "Budou - li však zbývat , ukončíme" (lit. "will - if however remain , we-stop")
|
| 733 |
+
# "však" depends on "ukončíme" while "budou" and "li" depend nonprojectively
|
| 734 |
+
# on "zbývat" (which depends on "ukončíme"). "Budou" is aux and "li" is mark.
|
| 735 |
+
# Yet the hyphen must depend on one of them because any other attachment would
|
| 736 |
+
# be non-projective. Here we assume that if the parent of a punctuation node
|
| 737 |
+
# is attached nonprojectively, punctuation can be attached to it to avoid its
|
| 738 |
+
# own nonprojectivity.
|
| 739 |
+
if node.is_nonprojective() and cdeprel == 'punct':
|
| 740 |
+
continue
|
| 741 |
+
# Auxiliaries, conjunctions and case markers will tollerate a few special
|
| 742 |
+
# types of modifiers.
|
| 743 |
+
# Punctuation should normally not depend on a functional node. However,
|
| 744 |
+
# it is possible that a functional node such as auxiliary verb is in
|
| 745 |
+
# quotation marks or brackets ("must") and then these symbols should depend
|
| 746 |
+
# on the functional node. We temporarily allow punctuation here, until we
|
| 747 |
+
# can detect precisely the bracket situation and disallow the rest.
|
| 748 |
+
# According to the guidelines
|
| 749 |
+
# (https://universaldependencies.org/u/overview/syntax.html#function-word-modifiers),
|
| 750 |
+
# mark can have a limited set of adverbial/oblique dependents, while the same
|
| 751 |
+
# is not allowed for nodes attached as case. Nevertheless, there are valid
|
| 752 |
+
# objections against this (see https://github.com/UniversalDependencies/docs/issues/618)
|
| 753 |
+
# and we may want to revisit the guideline in UD v3. For the time being,
|
| 754 |
+
# we make the validator more benevolent to 'case' too. (If we now force people
|
| 755 |
+
# to attach adverbials higher, information will be lost and later reversal
|
| 756 |
+
# of the step will not be possible.)
|
| 757 |
+
# Coordinating conjunctions usually depend on a non-first conjunct, i.e.,
|
| 758 |
+
# on a node whose deprel is 'conj'. However, there are paired conjunctions
|
| 759 |
+
# such as "both-and", "either-or". Here the first part is attached to the
|
| 760 |
+
# first conjunct. Since some function nodes (mark, case, aux, cop) can be
|
| 761 |
+
# coordinated, we must allow 'cc' children under these nodes, too. However,
|
| 762 |
+
# we do not want to allow 'cc' under another 'cc'. (Still, 'cc' can have
|
| 763 |
+
# a 'conj' dependent. In "and/or", "or" will depend on "and" as 'conj'.)
|
| 764 |
+
if re.match(r"^(mark|case)$", pdeprel) and not re.match(r"^(advmod|obl|goeswith|fixed|reparandum|conj|cc|punct)$", cdeprel):
|
| 765 |
+
Error(
|
| 766 |
+
state=state, config=self.incfg,
|
| 767 |
+
nodeid=node.ord,
|
| 768 |
+
testid='leaf-mark-case',
|
| 769 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 770 |
+
).confirm()
|
| 771 |
+
if re.match(r"^(aux|cop)$", pdeprel) and not re.match(r"^(goeswith|fixed|reparandum|conj|cc|punct)$", cdeprel):
|
| 772 |
+
Error(
|
| 773 |
+
state=state, config=self.incfg,
|
| 774 |
+
nodeid=node.ord,
|
| 775 |
+
testid='leaf-aux-cop',
|
| 776 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 777 |
+
).confirm()
|
| 778 |
+
# Classifiers must be allowed under demonstrative determiners according to the clf guidelines.
|
| 779 |
+
# People have identified various constructions where the restriction
|
| 780 |
+
# on children of det dependents may have to be relaxed even if not
|
| 781 |
+
# mentioned directly in the universal guidelines.
|
| 782 |
+
# https://universaldependencies.org/workgroups/newdoc/children_of_determiners.html
|
| 783 |
+
# Latvian: There are compound determiners, composed of a PART and a head PRON.
|
| 784 |
+
# They are not fixed, so they need a separate exception for the compound deprel.
|
| 785 |
+
# (Laura, https://github.com/UniversalDependencies/docs/issues/1059#issuecomment-2413484624)
|
| 786 |
+
# Hebrew: Demonstrative pronouns have their own determiners, as in “the men the these” = “these men”.
|
| 787 |
+
# It is also parallel to how adjectival modification works in Modern Hebrew.
|
| 788 |
+
# Maybe determiners under demonstratives could be allowed in some languages but not the others?
|
| 789 |
+
# (Daniel, https://github.com/UniversalDependencies/docs/issues/1059#issuecomment-2400694043)
|
| 790 |
+
# Classical Armenian: Case marker may be repeated both at a noun and at its demonstrative.
|
| 791 |
+
# We probably should allow demonstratives to have their own case child, but ideally we should
|
| 792 |
+
# not allow it for all determiners in all languages because it opens the door for errors
|
| 793 |
+
# (currently there are such errors in Chinese data). ###!!! For now I am allowing it everywhere.
|
| 794 |
+
# (Petr, https://github.com/UniversalDependencies/docs/issues/1059#issuecomment-2441260051)
|
| 795 |
+
# Spoken data:
|
| 796 |
+
# There is a lot of fillers ("euh"), tagged INTJ and attached as discourse
|
| 797 |
+
# "to the most relevant nearby unit" (that is the guideline). The most
|
| 798 |
+
# relevant nearby unit may be a determiner. Similarly, parentheticals
|
| 799 |
+
# should be attached as parataxis to the most relevant unit, and again
|
| 800 |
+
# the unit is not necessarily a clause. For example, Latvian:
|
| 801 |
+
# "tādā godīgā iestādē ieperinājušies daži (tikai daži!) zagļi"
|
| 802 |
+
# “a few (only a few!) thieves have nested in such an honest institution”
|
| 803 |
+
# (Laura, https://github.com/UniversalDependencies/docs/issues/1059#issuecomment-2438448236)
|
| 804 |
+
# Several treebanks have problems with possessive determiners, which
|
| 805 |
+
# are referential and can thus take dependents such as appos, acl:relcl, even nmod.
|
| 806 |
+
# Joakim thinks that such possessives should be nmod rather than det,
|
| 807 |
+
# but that's not how many of us understand the UD guidelines. For now,
|
| 808 |
+
# the test should be thus relaxed if the determiner has Poss=Yes.
|
| 809 |
+
# Flavio also argued that certain multiword det expressions should be
|
| 810 |
+
# connected by flat:redup (rather than fixed), which is why flat should
|
| 811 |
+
# be another exception.
|
| 812 |
+
if re.match(r"^(det)$", pdeprel) and not re.match(r"^(det|case|advmod|obl|clf|goeswith|fixed|flat|compound|reparandum|discourse|parataxis|conj|cc|punct)$", cdeprel) and not (pfeats['Poss'] == 'Yes' and re.match(r"^(appos|acl|nmod)$", cdeprel)):
|
| 813 |
+
Error(
|
| 814 |
+
state=state, config=self.incfg,
|
| 815 |
+
nodeid=node.ord,
|
| 816 |
+
testid='leaf-det',
|
| 817 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 818 |
+
).confirm()
|
| 819 |
+
if re.match(r"^(clf)$", pdeprel) and not re.match(r"^(advmod|obl|goeswith|fixed|reparandum|conj|cc|punct)$", cdeprel):
|
| 820 |
+
Error(
|
| 821 |
+
state=state, config=self.incfg,
|
| 822 |
+
nodeid=node.ord,
|
| 823 |
+
testid='leaf-clf',
|
| 824 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 825 |
+
).confirm()
|
| 826 |
+
if re.match(r"^(cc)$", pdeprel) and not re.match(r"^(goeswith|fixed|reparandum|conj|punct)$", cdeprel):
|
| 827 |
+
Error(
|
| 828 |
+
state=state, config=self.incfg,
|
| 829 |
+
nodeid=node.ord,
|
| 830 |
+
testid='leaf-cc',
|
| 831 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 832 |
+
).confirm()
|
| 833 |
+
# Fixed expressions should not be nested, i.e., no chains of fixed relations.
|
| 834 |
+
# As they are supposed to represent functional elements, they should not have
|
| 835 |
+
# other dependents either, with the possible exception of conj.
|
| 836 |
+
# We also allow a punct child, at least temporarily, because of fixed
|
| 837 |
+
# expressions that have a hyphen in the middle (e.g. Russian "вперед-назад").
|
| 838 |
+
# It would be better to keep these expressions as one token. But sometimes
|
| 839 |
+
# the tokenizer is out of control of the UD data providers and it is not
|
| 840 |
+
# practical to retokenize.
|
| 841 |
+
elif pdeprel == 'fixed' and not re.match(r"^(goeswith|reparandum|conj|punct)$", cdeprel):
|
| 842 |
+
Error(
|
| 843 |
+
state=state, config=self.incfg,
|
| 844 |
+
nodeid=node.ord,
|
| 845 |
+
testid='leaf-fixed',
|
| 846 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 847 |
+
).confirm()
|
| 848 |
+
# Goeswith cannot have any children, not even another goeswith.
|
| 849 |
+
elif pdeprel == 'goeswith':
|
| 850 |
+
Error(
|
| 851 |
+
state=state, config=self.incfg,
|
| 852 |
+
nodeid=node.ord,
|
| 853 |
+
testid='leaf-goeswith',
|
| 854 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 855 |
+
).confirm()
|
| 856 |
+
# Punctuation can exceptionally have other punct children if an exclamation
|
| 857 |
+
# mark is in brackets or quotes. It cannot have other children.
|
| 858 |
+
elif pdeprel == 'punct' and cdeprel != 'punct':
|
| 859 |
+
Error(
|
| 860 |
+
state=state, config=self.incfg,
|
| 861 |
+
nodeid=node.ord,
|
| 862 |
+
testid='leaf-punct',
|
| 863 |
+
message=f"'{pdeprel}' not expected to have children ({idparent}:{node.form}:{pdeprel} --> {idchild}:{child.form}:{cdeprel})"
|
| 864 |
+
).confirm()
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
def check_fixed_span(self, state, node):
|
| 869 |
+
"""
|
| 870 |
+
Like with goeswith, the fixed relation should not in general skip words that
|
| 871 |
+
are not part of the fixed expression. Unlike goeswith however, there can be
|
| 872 |
+
an intervening punctuation symbol. Moreover, the rule that fixed expressions
|
| 873 |
+
cannot be discontiguous has been challenged with examples from Swedish and
|
| 874 |
+
Coptic, see https://github.com/UniversalDependencies/docs/issues/623.
|
| 875 |
+
Hence, the test was turned off 2019-04-13. I am re-activating it 2023-09-03
|
| 876 |
+
as just a warning.
|
| 877 |
+
|
| 878 |
+
Parameters
|
| 879 |
+
----------
|
| 880 |
+
state : udtools.state.State
|
| 881 |
+
The state of the validation run.
|
| 882 |
+
node : udapi.core.node.Node object
|
| 883 |
+
The tree node to be tested.
|
| 884 |
+
|
| 885 |
+
Reads from state
|
| 886 |
+
----------------
|
| 887 |
+
current_node_linenos : dict(str: int)
|
| 888 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 889 |
+
the input file.
|
| 890 |
+
|
| 891 |
+
Incidents
|
| 892 |
+
---------
|
| 893 |
+
fixed-gap
|
| 894 |
+
"""
|
| 895 |
+
fxchildren = [c for c in node.children if c.udeprel == 'fixed']
|
| 896 |
+
if fxchildren:
|
| 897 |
+
fxlist = sorted([node] + fxchildren)
|
| 898 |
+
fxrange = [n for n in node.root.descendants if n.ord >= node.ord and n.ord <= fxchildren[-1].ord]
|
| 899 |
+
# All nodes between me and my last fixed child should be either fixed or punct.
|
| 900 |
+
fxgap = [n for n in fxrange if n.udeprel != 'punct' and n not in fxlist]
|
| 901 |
+
if fxgap:
|
| 902 |
+
fxordlist = [n.ord for n in fxlist]
|
| 903 |
+
fxexpr = ' '.join([(n.form if n in fxlist else '*') for n in fxrange])
|
| 904 |
+
Warning(
|
| 905 |
+
state=state, config=self.incfg,
|
| 906 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 907 |
+
nodeid=node.ord,
|
| 908 |
+
level=3,
|
| 909 |
+
testclass=TestClass.SYNTAX,
|
| 910 |
+
testid='fixed-gap',
|
| 911 |
+
message=f"Gaps in fixed expression {str(fxordlist)} '{fxexpr}'"
|
| 912 |
+
).confirm()
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
def check_goeswith_span(self, state, node):
|
| 916 |
+
"""
|
| 917 |
+
The relation 'goeswith' is used to connect word parts that are separated
|
| 918 |
+
by whitespace and should be one word instead. We assume that the relation
|
| 919 |
+
goes left-to-right, which is checked elsewhere. Here we check that the
|
| 920 |
+
nodes really were separated by whitespace. If there is another node in the
|
| 921 |
+
middle, it must be also attached via 'goeswith'. The parameter id refers to
|
| 922 |
+
the node whose goeswith children we test.
|
| 923 |
+
|
| 924 |
+
Parameters
|
| 925 |
+
----------
|
| 926 |
+
state : udtools.state.State
|
| 927 |
+
The state of the validation run.
|
| 928 |
+
node : udapi.core.node.Node object
|
| 929 |
+
The tree node to be tested.
|
| 930 |
+
|
| 931 |
+
Reads from state
|
| 932 |
+
----------------
|
| 933 |
+
current_node_linenos : dict(str: int)
|
| 934 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 935 |
+
the input file.
|
| 936 |
+
|
| 937 |
+
Incidents
|
| 938 |
+
---------
|
| 939 |
+
goeswith-gap
|
| 940 |
+
goeswith-nospace
|
| 941 |
+
goeswith-missing-typo
|
| 942 |
+
"""
|
| 943 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 944 |
+
Incident.default_level = 3
|
| 945 |
+
Incident.default_testclass = TestClass.SYNTAX
|
| 946 |
+
gwchildren = [c for c in node.children if c.udeprel == 'goeswith']
|
| 947 |
+
if gwchildren:
|
| 948 |
+
gwlist = sorted([node] + gwchildren)
|
| 949 |
+
gwrange = [n for n in node.root.descendants if n.ord >= node.ord and n.ord <= gwchildren[-1].ord]
|
| 950 |
+
# All nodes between me and my last goeswith child should be goeswith too.
|
| 951 |
+
if gwlist != gwrange:
|
| 952 |
+
gwordlist = [n.ord for n in gwlist]
|
| 953 |
+
gwordrange = [n.ord for n in gwrange]
|
| 954 |
+
Error(
|
| 955 |
+
state=state, config=self.incfg,
|
| 956 |
+
nodeid=node.ord,
|
| 957 |
+
testid='goeswith-gap',
|
| 958 |
+
message=f"Gaps in goeswith group {str(gwordlist)} != {str(gwordrange)}."
|
| 959 |
+
).confirm()
|
| 960 |
+
# Non-last node in a goeswith range must have a space after itself.
|
| 961 |
+
nospaceafter = [x for x in gwlist[:-1] if x.misc['SpaceAfter'] == 'No']
|
| 962 |
+
if nospaceafter:
|
| 963 |
+
Error(
|
| 964 |
+
state=state, config=self.incfg,
|
| 965 |
+
nodeid=node.ord,
|
| 966 |
+
testid='goeswith-nospace',
|
| 967 |
+
message="'goeswith' cannot connect nodes that are not separated by whitespace."
|
| 968 |
+
).confirm()
|
| 969 |
+
# This is not about the span of the interrupted word, but since we already
|
| 970 |
+
# know that we are at the head of a goeswith word, let's do it here, too.
|
| 971 |
+
# Every goeswith parent should also have Typo=Yes. However, this is not
|
| 972 |
+
# required if the treebank does not have features at all.
|
| 973 |
+
incident = Error(
|
| 974 |
+
state=state, config=self.incfg,
|
| 975 |
+
nodeid=node.ord,
|
| 976 |
+
testclass=TestClass.MORPHO,
|
| 977 |
+
testid='goeswith-missing-typo',
|
| 978 |
+
message="Since the treebank has morphological features, 'Typo=Yes' must be used with 'goeswith' heads."
|
| 979 |
+
)
|
| 980 |
+
self.check_required_feature(state, node.feats, 'Typo', 'Yes', incident)
|
| 981 |
+
|
| 982 |
+
|
| 983 |
+
|
| 984 |
+
def check_goeswith_morphology_and_edeps(self, state, node):
|
| 985 |
+
"""
|
| 986 |
+
If a node has the 'goeswith' incoming relation, it is a non-first part of
|
| 987 |
+
a mistakenly interrupted word. The lemma, upos tag and morphological features
|
| 988 |
+
of the word should be annotated at the first part, not here.
|
| 989 |
+
|
| 990 |
+
Parameters
|
| 991 |
+
----------
|
| 992 |
+
state : udtools.state.State
|
| 993 |
+
The state of the validation run.
|
| 994 |
+
node : udapi.core.node.Node object
|
| 995 |
+
The tree node to be tested.
|
| 996 |
+
|
| 997 |
+
Reads from state
|
| 998 |
+
----------------
|
| 999 |
+
current_node_linenos : dict(str: int)
|
| 1000 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 1001 |
+
the input file.
|
| 1002 |
+
|
| 1003 |
+
Incidents
|
| 1004 |
+
---------
|
| 1005 |
+
goeswith-lemma
|
| 1006 |
+
goeswith-upos
|
| 1007 |
+
goeswith-feats
|
| 1008 |
+
goeswith-edeps
|
| 1009 |
+
"""
|
| 1010 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 1011 |
+
Incident.default_level = 3
|
| 1012 |
+
Incident.default_testclass = TestClass.MORPHO
|
| 1013 |
+
if node.udeprel == 'goeswith':
|
| 1014 |
+
if node.lemma != '_':
|
| 1015 |
+
Error(
|
| 1016 |
+
state=state, config=self.incfg,
|
| 1017 |
+
nodeid=node.ord,
|
| 1018 |
+
testid='goeswith-lemma',
|
| 1019 |
+
message="The lemma of a 'goeswith'-connected word must be annotated only at the first part."
|
| 1020 |
+
).confirm()
|
| 1021 |
+
if node.upos != 'X':
|
| 1022 |
+
Error(
|
| 1023 |
+
state=state, config=self.incfg,
|
| 1024 |
+
nodeid=node.ord,
|
| 1025 |
+
testid='goeswith-upos',
|
| 1026 |
+
message="The UPOS tag of a 'goeswith'-connected word must be annotated only at the first part; the other parts must be tagged 'X'."
|
| 1027 |
+
).confirm()
|
| 1028 |
+
if str(node.feats) != '_':
|
| 1029 |
+
Error(
|
| 1030 |
+
state=state, config=self.incfg,
|
| 1031 |
+
nodeid=node.ord,
|
| 1032 |
+
testid='goeswith-feats',
|
| 1033 |
+
message="The morphological features of a 'goeswith'-connected word must be annotated only at the first part."
|
| 1034 |
+
).confirm()
|
| 1035 |
+
if str(node.raw_deps) != '_' and str(node.raw_deps) != str(node.parent.ord)+':'+node.deprel:
|
| 1036 |
+
Error(
|
| 1037 |
+
state=state, config=self.incfg,
|
| 1038 |
+
nodeid=node.ord,
|
| 1039 |
+
testclass=TestClass.ENHANCED,
|
| 1040 |
+
testid='goeswith-edeps',
|
| 1041 |
+
message="A 'goeswith' dependent cannot have any additional dependencies in the enhanced graph."
|
| 1042 |
+
).confirm()
|
| 1043 |
+
|
| 1044 |
+
|
| 1045 |
+
|
| 1046 |
+
def check_projective_punctuation(self, state, node):
|
| 1047 |
+
"""
|
| 1048 |
+
Punctuation is not supposed to cause nonprojectivity or to be attached
|
| 1049 |
+
nonprojectively.
|
| 1050 |
+
|
| 1051 |
+
Parameters
|
| 1052 |
+
----------
|
| 1053 |
+
state : udtools.state.State
|
| 1054 |
+
The state of the validation run.
|
| 1055 |
+
node : udapi.core.node.Node object
|
| 1056 |
+
The tree node to be tested.
|
| 1057 |
+
|
| 1058 |
+
Reads from state
|
| 1059 |
+
----------------
|
| 1060 |
+
current_node_linenos : dict(str: int)
|
| 1061 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 1062 |
+
the input file.
|
| 1063 |
+
|
| 1064 |
+
Incidents
|
| 1065 |
+
---------
|
| 1066 |
+
punct-causes-nonproj
|
| 1067 |
+
punct-is-nonproj
|
| 1068 |
+
"""
|
| 1069 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 1070 |
+
Incident.default_level = 3
|
| 1071 |
+
Incident.default_testclass = TestClass.SYNTAX
|
| 1072 |
+
if node.udeprel == 'punct':
|
| 1073 |
+
nonprojnodes = utils.get_caused_nonprojectivities(node)
|
| 1074 |
+
if nonprojnodes:
|
| 1075 |
+
nonprojids = [x.ord for x in nonprojnodes]
|
| 1076 |
+
Error(
|
| 1077 |
+
state=state, config=self.incfg,
|
| 1078 |
+
nodeid=node.ord,
|
| 1079 |
+
testid='punct-causes-nonproj',
|
| 1080 |
+
message=f"Punctuation must not cause non-projectivity of nodes {nonprojids}",
|
| 1081 |
+
references=utils.create_references(nonprojnodes, state, 'Node made nonprojective')
|
| 1082 |
+
).confirm()
|
| 1083 |
+
gapnodes = utils.get_gap(node)
|
| 1084 |
+
if gapnodes:
|
| 1085 |
+
gapids = [x.ord for x in gapnodes]
|
| 1086 |
+
Error(
|
| 1087 |
+
state=state, config=self.incfg,
|
| 1088 |
+
nodeid=node.ord,
|
| 1089 |
+
testid='punct-is-nonproj',
|
| 1090 |
+
message=f"Punctuation must not be attached non-projectively over nodes {gapids}",
|
| 1091 |
+
references=utils.create_references(gapnodes, state, 'Node in gap')
|
| 1092 |
+
).confirm()
|
| 1093 |
+
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
def check_enhanced_orphan(self, state, node):
|
| 1097 |
+
"""
|
| 1098 |
+
Checks universally valid consequences of the annotation guidelines in the
|
| 1099 |
+
enhanced representation. Currently tests only phenomena specific to the
|
| 1100 |
+
enhanced dependencies; however, we should also test things that are
|
| 1101 |
+
required in the basic dependencies (such as left-to-right coordination),
|
| 1102 |
+
unless it is obvious that in enhanced dependencies such things are legal.
|
| 1103 |
+
|
| 1104 |
+
Parameters
|
| 1105 |
+
----------
|
| 1106 |
+
state : udtools.state.State
|
| 1107 |
+
The state of the validation run.
|
| 1108 |
+
node : udapi.core.node.Node object
|
| 1109 |
+
The node whose incoming relation will be validated. This function
|
| 1110 |
+
operates on both regular and empty nodes. Make sure to call it for
|
| 1111 |
+
empty nodes, too!
|
| 1112 |
+
|
| 1113 |
+
Reads from state
|
| 1114 |
+
----------------
|
| 1115 |
+
current_node_linenos : dict(str: int)
|
| 1116 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 1117 |
+
the input file.
|
| 1118 |
+
|
| 1119 |
+
Incidents
|
| 1120 |
+
---------
|
| 1121 |
+
empty-node-after-eorphan
|
| 1122 |
+
eorphan-after-empty-node
|
| 1123 |
+
"""
|
| 1124 |
+
lineno = state.current_node_linenos[str(node.ord)]
|
| 1125 |
+
Incident.default_lineno = lineno
|
| 1126 |
+
Incident.default_level = 3
|
| 1127 |
+
Incident.default_testclass = TestClass.ENHANCED
|
| 1128 |
+
# Enhanced dependencies should not contain the orphan relation.
|
| 1129 |
+
# However, all types of enhancements are optional and orphans are excluded
|
| 1130 |
+
# only if this treebank addresses gapping. We do not know it until we see
|
| 1131 |
+
# the first empty node.
|
| 1132 |
+
if str(node.deps) == '_':
|
| 1133 |
+
return
|
| 1134 |
+
if node.is_empty():
|
| 1135 |
+
if not state.seen_empty_node:
|
| 1136 |
+
state.seen_empty_node = lineno
|
| 1137 |
+
# Empty node itself is not an error. Report it only for the first time
|
| 1138 |
+
# and only if an orphan occurred before it.
|
| 1139 |
+
if state.seen_enhanced_orphan:
|
| 1140 |
+
Error(
|
| 1141 |
+
state=state, config=self.incfg,
|
| 1142 |
+
nodeid=node.ord,
|
| 1143 |
+
testid='empty-node-after-eorphan',
|
| 1144 |
+
message=f"Empty node means that we address gapping and there should be no orphans in the enhanced graph; but we saw one on line {state.seen_enhanced_orphan}"
|
| 1145 |
+
).confirm()
|
| 1146 |
+
udeprels = set([utils.lspec2ud(edep['deprel']) for edep in node.deps])
|
| 1147 |
+
if 'orphan' in udeprels:
|
| 1148 |
+
if not state.seen_enhanced_orphan:
|
| 1149 |
+
state.seen_enhanced_orphan = lineno
|
| 1150 |
+
# If we have seen an empty node, then the orphan is an error.
|
| 1151 |
+
if state.seen_empty_node:
|
| 1152 |
+
Error(
|
| 1153 |
+
state=state, config=self.incfg,
|
| 1154 |
+
nodeid=node.ord,
|
| 1155 |
+
testid='eorphan-after-empty-node',
|
| 1156 |
+
message=f"'orphan' not allowed in enhanced graph because we saw an empty node on line {state.seen_empty_node}"
|
| 1157 |
+
).confirm()
|
ud-tools/udtools/src/udtools/level4.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
# According to https://stackoverflow.com/questions/1832893/python-regex-matching-unicode-properties,
|
| 6 |
+
# the regex module has the same API as re but it can check Unicode character properties using \p{}
|
| 7 |
+
# as in Perl.
|
| 8 |
+
#import re
|
| 9 |
+
import regex as re
|
| 10 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 11 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 12 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 13 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 14 |
+
# from udtools import Validator.
|
| 15 |
+
try:
|
| 16 |
+
import udtools.src.udtools.utils as utils
|
| 17 |
+
from udtools.src.udtools.incident import Incident, Error, TestClass
|
| 18 |
+
from udtools.src.udtools.level3 import Level3
|
| 19 |
+
except ModuleNotFoundError:
|
| 20 |
+
import udtools.utils as utils
|
| 21 |
+
from udtools.incident import Incident, Error, TestClass
|
| 22 |
+
from udtools.level3 import Level3
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Level4(Level3):
|
| 27 |
+
#==============================================================================
|
| 28 |
+
# Level 4 tests. Language-specific formal tests. Now we can check in which
|
| 29 |
+
# words spaces are permitted, and which Feature=Value pairs are defined.
|
| 30 |
+
#==============================================================================
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def check_words_with_spaces(self, state, node):
|
| 35 |
+
"""
|
| 36 |
+
Checks a single line for disallowed whitespace.
|
| 37 |
+
Here we assume that all language-independent whitespace-related tests have
|
| 38 |
+
already been done on level 1, so we only check for words with spaces that
|
| 39 |
+
are explicitly allowed in a given language.
|
| 40 |
+
|
| 41 |
+
Parameters
|
| 42 |
+
----------
|
| 43 |
+
state : udtools.state.State
|
| 44 |
+
The state of the validation run.
|
| 45 |
+
node : udapi.core.node.Node object
|
| 46 |
+
The node whose incoming relation will be validated. This function
|
| 47 |
+
operates on both regular and empty nodes. Make sure to call it for
|
| 48 |
+
empty nodes, too!
|
| 49 |
+
|
| 50 |
+
Reads from state
|
| 51 |
+
----------------
|
| 52 |
+
current_node_linenos : dict(str: int)
|
| 53 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 54 |
+
the input file.
|
| 55 |
+
|
| 56 |
+
Incidents
|
| 57 |
+
---------
|
| 58 |
+
invalid-word-with-space
|
| 59 |
+
"""
|
| 60 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 61 |
+
Incident.default_level = 4
|
| 62 |
+
Incident.default_testclass = TestClass.FORMAT
|
| 63 |
+
# List of permited words with spaces is language-specific.
|
| 64 |
+
# The current token may be in a different language due to code switching.
|
| 65 |
+
tospacedata = self.data.get_tospace_for_language(self.lang)
|
| 66 |
+
lang = self.lang
|
| 67 |
+
altlang = utils.get_alt_language(node)
|
| 68 |
+
if altlang:
|
| 69 |
+
lang = altlang
|
| 70 |
+
tospacedata = self.data.get_tospace_for_language(altlang)
|
| 71 |
+
for column in ('FORM', 'LEMMA'):
|
| 72 |
+
word = node.form if column == 'FORM' else node.lemma
|
| 73 |
+
# Is there whitespace in the word?
|
| 74 |
+
if utils.crex.ws.search(word):
|
| 75 |
+
# Whitespace found. Does the word pass the regular expression that defines permitted words with spaces in this language?
|
| 76 |
+
if tospacedata:
|
| 77 |
+
# For the purpose of this test, NO-BREAK SPACE is equal to SPACE.
|
| 78 |
+
string_to_test = re.sub(r'\xA0', ' ', word)
|
| 79 |
+
if not tospacedata[1].fullmatch(string_to_test):
|
| 80 |
+
Error(
|
| 81 |
+
state=state, config=self.incfg,
|
| 82 |
+
nodeid=node.ord,
|
| 83 |
+
testid='invalid-word-with-space',
|
| 84 |
+
message=f"'{word}' in column {column} is not on the list of exceptions allowed to contain whitespace.",
|
| 85 |
+
explanation=self.data.explain_tospace(lang)
|
| 86 |
+
).confirm()
|
| 87 |
+
else:
|
| 88 |
+
Error(
|
| 89 |
+
state=state, config=self.incfg,
|
| 90 |
+
nodeid=node.ord,
|
| 91 |
+
testid='invalid-word-with-space',
|
| 92 |
+
message=f"'{word}' in column {column} is not on the list of exceptions allowed to contain whitespace.",
|
| 93 |
+
explanation=self.data.explain_tospace(lang)
|
| 94 |
+
).confirm()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def check_feature_values(self, state, node):
|
| 99 |
+
"""
|
| 100 |
+
Checks that a feature-value pair is listed as approved. Feature lists are
|
| 101 |
+
language-specific. To disallow non-universal features, test on level 4 with
|
| 102 |
+
language 'ud'.
|
| 103 |
+
|
| 104 |
+
Parameters
|
| 105 |
+
----------
|
| 106 |
+
state : udtools.state.State
|
| 107 |
+
The state of the validation run.
|
| 108 |
+
node : udapi.core.node.Node object
|
| 109 |
+
The node whose incoming relation will be validated. This function
|
| 110 |
+
operates on both regular and empty nodes. Make sure to call it for
|
| 111 |
+
empty nodes, too!
|
| 112 |
+
|
| 113 |
+
Reads from state
|
| 114 |
+
----------------
|
| 115 |
+
current_node_linenos : dict(str: int)
|
| 116 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 117 |
+
the input file.
|
| 118 |
+
|
| 119 |
+
Incidents
|
| 120 |
+
---------
|
| 121 |
+
mwt-typo-repeated-at-word
|
| 122 |
+
feature-unknown
|
| 123 |
+
feature-not-permitted
|
| 124 |
+
feature-value-unknown
|
| 125 |
+
feature-upos-not-permitted
|
| 126 |
+
feature-value-upos-not-permitted
|
| 127 |
+
"""
|
| 128 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 129 |
+
Incident.default_level = 4
|
| 130 |
+
Incident.default_testclass = TestClass.MORPHO
|
| 131 |
+
if str(node.feats) == '_':
|
| 132 |
+
return True
|
| 133 |
+
# List of permited features is language-specific.
|
| 134 |
+
# The current token may be in a different language due to code switching.
|
| 135 |
+
default_lang = self.lang
|
| 136 |
+
default_featset = featset = self.data.get_feats_for_language(self.lang)
|
| 137 |
+
lang = default_lang
|
| 138 |
+
altlang = utils.get_alt_language(node)
|
| 139 |
+
if altlang:
|
| 140 |
+
lang = altlang
|
| 141 |
+
featset = self.data.get_feats_for_language(altlang)
|
| 142 |
+
for f in node.feats:
|
| 143 |
+
values = node.feats[f].split(',')
|
| 144 |
+
for v in values:
|
| 145 |
+
# Level 2 tested character properties and canonical order but not that the f-v pair is known.
|
| 146 |
+
# Level 4 also checks whether the feature value is on the list.
|
| 147 |
+
# If only universal feature-value pairs are allowed, test on level 4 with lang='ud'.
|
| 148 |
+
# The feature Typo=Yes is the only feature allowed on a multi-word token line.
|
| 149 |
+
# If it occurs there, it cannot be duplicated on the lines of the component words.
|
| 150 |
+
if f == 'Typo' and node.multiword_token:
|
| 151 |
+
mwt = node.multiword_token
|
| 152 |
+
if mwt.feats['Typo'] == 'Yes':
|
| 153 |
+
Error(
|
| 154 |
+
state=state, config=self.incfg,
|
| 155 |
+
nodeid=node.ord,
|
| 156 |
+
testid='mwt-typo-repeated-at-word',
|
| 157 |
+
message=f"Feature Typo cannot occur at word [{node.ord}] if it already occurred at the corresponding multiword token [{mwt.ord_range}]."
|
| 158 |
+
).confirm()
|
| 159 |
+
# In case of code switching, the current token may not be in the default language
|
| 160 |
+
# and then its features are checked against a different feature set. An exception
|
| 161 |
+
# is the feature Foreign, which always relates to the default language of the
|
| 162 |
+
# corpus (but Foreign=Yes should probably be allowed for all UPOS categories in
|
| 163 |
+
# all languages).
|
| 164 |
+
effective_featset = featset
|
| 165 |
+
effective_lang = lang
|
| 166 |
+
if f == 'Foreign':
|
| 167 |
+
# Revert to the default.
|
| 168 |
+
effective_featset = default_featset
|
| 169 |
+
effective_lang = default_lang
|
| 170 |
+
if effective_featset is not None:
|
| 171 |
+
if f not in effective_featset:
|
| 172 |
+
Error(
|
| 173 |
+
state=state, config=self.incfg,
|
| 174 |
+
nodeid=node.ord,
|
| 175 |
+
testid='feature-unknown',
|
| 176 |
+
message=f"Feature {f} is not documented for language [{effective_lang}] ('{utils.formtl(node)}', {f}={v}).",
|
| 177 |
+
explanation=self.data.explain_feats(effective_lang)
|
| 178 |
+
).confirm()
|
| 179 |
+
else:
|
| 180 |
+
lfrecord = effective_featset[f]
|
| 181 |
+
if lfrecord['permitted'] == 0:
|
| 182 |
+
Error(
|
| 183 |
+
state=state, config=self.incfg,
|
| 184 |
+
nodeid=node.ord,
|
| 185 |
+
testid='feature-not-permitted',
|
| 186 |
+
message=f"Feature {f} is not permitted in language [{effective_lang}] ('{utils.formtl(node)}, {f}={v}').",
|
| 187 |
+
explanation=self.data.explain_feats(effective_lang)
|
| 188 |
+
).confirm()
|
| 189 |
+
else:
|
| 190 |
+
values = lfrecord['uvalues'] + lfrecord['lvalues'] + lfrecord['unused_uvalues'] + lfrecord['unused_lvalues']
|
| 191 |
+
if not v in values:
|
| 192 |
+
Error(
|
| 193 |
+
state=state, config=self.incfg,
|
| 194 |
+
nodeid=node.ord,
|
| 195 |
+
testid='feature-value-unknown',
|
| 196 |
+
message=f"Value {v} is not documented for feature {f} in language [{effective_lang}] ('{utils.formtl(node)}').",
|
| 197 |
+
explanation=self.data.explain_feats(effective_lang)
|
| 198 |
+
).confirm()
|
| 199 |
+
elif not node.upos in lfrecord['byupos']:
|
| 200 |
+
Error(
|
| 201 |
+
state=state, config=self.incfg,
|
| 202 |
+
nodeid=node.ord,
|
| 203 |
+
testid='feature-upos-not-permitted',
|
| 204 |
+
message=f"Feature {f} is not permitted with UPOS {node.upos} in language [{effective_lang}] ('{utils.formtl(node)}').",
|
| 205 |
+
explanation=self.data.explain_feats(effective_lang)
|
| 206 |
+
).confirm()
|
| 207 |
+
elif not v in lfrecord['byupos'][node.upos] or lfrecord['byupos'][node.upos][v]==0:
|
| 208 |
+
Error(
|
| 209 |
+
state=state, config=self.incfg,
|
| 210 |
+
nodeid=node.ord,
|
| 211 |
+
testid='feature-value-upos-not-permitted',
|
| 212 |
+
message=f"Value {v} of feature {f} is not permitted with UPOS {node.upos} in language [{effective_lang}] ('{utils.formtl(node)}').",
|
| 213 |
+
explanation=self.data.explain_feats(effective_lang)
|
| 214 |
+
).confirm()
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def check_deprels(self, state, node):
|
| 219 |
+
"""
|
| 220 |
+
Checks that a dependency relation label is listed as approved in the
|
| 221 |
+
given language. As a language-specific test, this function belongs to
|
| 222 |
+
level 4. This method currently checks udeprels both in the DEPREL
|
| 223 |
+
column and in the DEPS column.
|
| 224 |
+
|
| 225 |
+
Parameters
|
| 226 |
+
----------
|
| 227 |
+
state : udtools.state.State
|
| 228 |
+
The state of the validation run.
|
| 229 |
+
node : udapi.core.node.Node object
|
| 230 |
+
The node whose incoming relation will be validated.
|
| 231 |
+
|
| 232 |
+
Reads from state
|
| 233 |
+
----------------
|
| 234 |
+
current_node_linenos : dict(str: int)
|
| 235 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 236 |
+
the input file.
|
| 237 |
+
|
| 238 |
+
Incidents
|
| 239 |
+
---------
|
| 240 |
+
unknown-deprel
|
| 241 |
+
unknown-edeprel
|
| 242 |
+
"""
|
| 243 |
+
Incident.default_lineno = state.current_node_linenos[str(node.ord)]
|
| 244 |
+
Incident.default_level = 4
|
| 245 |
+
Incident.default_testclass = TestClass.SYNTAX
|
| 246 |
+
# List of permited relations is language-specific.
|
| 247 |
+
# The current token may be in a different language due to code switching.
|
| 248 |
+
# Unlike with features and auxiliaries, with deprels it is less clear
|
| 249 |
+
# whether we want to switch the set of labels when the token belongs to
|
| 250 |
+
# another language. Especially with subtypes that are not so much language
|
| 251 |
+
# specific. For example, we may have allowed 'flat:name' for our language,
|
| 252 |
+
# the maintainers of the other language have not allowed it, and then we
|
| 253 |
+
# could not use it when the foreign language is active. (This actually
|
| 254 |
+
# happened in French GSD.) We will thus allow the union of the main and the
|
| 255 |
+
# alternative deprelset when both the parent and the child belong to the
|
| 256 |
+
# same alternative language. Otherwise, only the main deprelset is allowed.
|
| 257 |
+
mainlang = self.lang
|
| 258 |
+
naltlang = utils.get_alt_language(node)
|
| 259 |
+
# The basic relation should be tested on regular nodes but not on empty nodes.
|
| 260 |
+
if not node.is_empty():
|
| 261 |
+
paltlang = utils.get_alt_language(node.parent)
|
| 262 |
+
main_deprelset = self.data.get_deprel_for_language(mainlang)
|
| 263 |
+
alt_deprelset = set()
|
| 264 |
+
if naltlang != None and naltlang != mainlang and naltlang == paltlang:
|
| 265 |
+
alt_deprelset = self.data.get_deprel_for_language(naltlang)
|
| 266 |
+
# Test only the universal part if testing at universal level.
|
| 267 |
+
deprel = node.deprel
|
| 268 |
+
if deprel not in main_deprelset and deprel not in alt_deprelset:
|
| 269 |
+
Error(
|
| 270 |
+
state=state, config=self.incfg,
|
| 271 |
+
nodeid=node.ord,
|
| 272 |
+
testid='unknown-deprel',
|
| 273 |
+
message=f"Unknown DEPREL label: '{deprel}'",
|
| 274 |
+
explanation=self.data.explain_deprel(mainlang)
|
| 275 |
+
).confirm()
|
| 276 |
+
# If there are enhanced dependencies, test their deprels, too.
|
| 277 |
+
# We already know that the contents of DEPS is parsable (deps_list() was
|
| 278 |
+
# first called from check_id_references() and the head indices are OK).
|
| 279 |
+
# The order of enhanced dependencies was already checked in check_deps().
|
| 280 |
+
Incident.default_testclass = TestClass.ENHANCED
|
| 281 |
+
if str(node.deps) != '_':
|
| 282 |
+
main_edeprelset = self.data.get_edeprel_for_language(mainlang)
|
| 283 |
+
alt_edeprelset = self.data.get_edeprel_for_language(naltlang)
|
| 284 |
+
for edep in node.deps:
|
| 285 |
+
parent = edep['parent']
|
| 286 |
+
deprel = edep['deprel']
|
| 287 |
+
paltlang = utils.get_alt_language(parent)
|
| 288 |
+
if not (deprel in main_edeprelset or naltlang != None and naltlang != mainlang and naltlang == paltlang and deprel in alt_edeprelset):
|
| 289 |
+
Error(
|
| 290 |
+
state=state, config=self.incfg,
|
| 291 |
+
nodeid=node.ord,
|
| 292 |
+
testid='unknown-edeprel',
|
| 293 |
+
message=f"Unknown enhanced relation type '{deprel}' in '{parent.ord}:{deprel}'",
|
| 294 |
+
explanation=self.data.explain_edeprel(mainlang)
|
| 295 |
+
).confirm()
|
ud-tools/udtools/src/udtools/level5.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 6 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 7 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 8 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 9 |
+
# from udtools import Validator.
|
| 10 |
+
try:
|
| 11 |
+
import udtools.src.udtools.utils as utils
|
| 12 |
+
from udtools.src.udtools.incident import Error, TestClass
|
| 13 |
+
from udtools.src.udtools.level4 import Level4
|
| 14 |
+
except ModuleNotFoundError:
|
| 15 |
+
import udtools.utils as utils
|
| 16 |
+
from udtools.incident import Error, TestClass
|
| 17 |
+
from udtools.level4 import Level4
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Level5(Level4):
|
| 22 |
+
#==============================================================================
|
| 23 |
+
# Level 5 tests. Annotation content vs. the guidelines, language-specific.
|
| 24 |
+
#==============================================================================
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def check_auxiliary_verbs(self, state, node):
|
| 29 |
+
"""
|
| 30 |
+
Verifies that the UPOS tag AUX is used only with lemmas that are known to
|
| 31 |
+
act as auxiliary verbs or particles in the given language.
|
| 32 |
+
|
| 33 |
+
Parameters
|
| 34 |
+
----------
|
| 35 |
+
state : udtools.state.State
|
| 36 |
+
The state of the validation run.
|
| 37 |
+
node : udapi.core.node.Node object
|
| 38 |
+
The node whose incoming relation will be validated. This function
|
| 39 |
+
operates on both regular and empty nodes. Make sure to call it for
|
| 40 |
+
empty nodes, too!
|
| 41 |
+
|
| 42 |
+
Reads from state
|
| 43 |
+
----------------
|
| 44 |
+
current_node_linenos : dict(str: int)
|
| 45 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 46 |
+
the input file.
|
| 47 |
+
|
| 48 |
+
Incidents
|
| 49 |
+
---------
|
| 50 |
+
aux-lemma
|
| 51 |
+
"""
|
| 52 |
+
if node.upos == 'AUX' and node.lemma != '_':
|
| 53 |
+
lang = self.lang
|
| 54 |
+
altlang = utils.get_alt_language(node)
|
| 55 |
+
if altlang:
|
| 56 |
+
lang = altlang
|
| 57 |
+
auxlist = self.data.get_aux_for_language(lang)
|
| 58 |
+
if not auxlist or not node.lemma in auxlist:
|
| 59 |
+
Error(
|
| 60 |
+
state=state, config=self.incfg,
|
| 61 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 62 |
+
nodeid=node.ord,
|
| 63 |
+
level=5,
|
| 64 |
+
testclass=TestClass.MORPHO,
|
| 65 |
+
testid='aux-lemma',
|
| 66 |
+
message=f"'{utils.lemmatl(node)}' is not an auxiliary in language [{lang}]",
|
| 67 |
+
explanation=self.data.explain_aux(lang)
|
| 68 |
+
).confirm()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def check_copula_lemmas(self, state, node):
|
| 73 |
+
"""
|
| 74 |
+
Verifies that the relation cop is used only with lemmas that are known to
|
| 75 |
+
act as copulas in the given language.
|
| 76 |
+
|
| 77 |
+
Parameters
|
| 78 |
+
----------
|
| 79 |
+
state : udtools.state.State
|
| 80 |
+
The state of the validation run.
|
| 81 |
+
node : udapi.core.node.Node object
|
| 82 |
+
The node whose incoming relation will be validated. This function
|
| 83 |
+
operates on both regular and empty nodes. Make sure to call it for
|
| 84 |
+
empty nodes, too!
|
| 85 |
+
|
| 86 |
+
Reads from state
|
| 87 |
+
----------------
|
| 88 |
+
current_node_linenos : dict(str: int)
|
| 89 |
+
Mapping from node ids (including empty nodes) to line numbers in
|
| 90 |
+
the input file.
|
| 91 |
+
|
| 92 |
+
Incidents
|
| 93 |
+
---------
|
| 94 |
+
cop-lemma
|
| 95 |
+
"""
|
| 96 |
+
if node.udeprel == 'cop' and node.lemma != '_':
|
| 97 |
+
lang = self.lang
|
| 98 |
+
altlang = utils.get_alt_language(node)
|
| 99 |
+
if altlang:
|
| 100 |
+
lang = altlang
|
| 101 |
+
coplist = self.data.get_cop_for_language(lang)
|
| 102 |
+
if not coplist or not node.lemma in coplist:
|
| 103 |
+
Error(
|
| 104 |
+
state=state, config=self.incfg,
|
| 105 |
+
lineno=state.current_node_linenos[str(node.ord)],
|
| 106 |
+
nodeid=node.ord,
|
| 107 |
+
level=5,
|
| 108 |
+
testclass=TestClass.SYNTAX,
|
| 109 |
+
testid='cop-lemma',
|
| 110 |
+
message=f"'{utils.lemmatl(node)}' is not a copula in language [{lang}]",
|
| 111 |
+
explanation=self.data.explain_cop(lang)
|
| 112 |
+
).confirm()
|
ud-tools/udtools/src/udtools/level6.py
ADDED
|
@@ -0,0 +1,798 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
# According to https://stackoverflow.com/questions/1832893/python-regex-matching-unicode-properties,
|
| 6 |
+
# the regex module has the same API as re but it can check Unicode character properties using \p{}
|
| 7 |
+
# as in Perl.
|
| 8 |
+
#import re
|
| 9 |
+
import regex as re
|
| 10 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 11 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 12 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 13 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 14 |
+
# from udtools import Validator.
|
| 15 |
+
try:
|
| 16 |
+
import udtools.src.udtools.utils as utils
|
| 17 |
+
from udtools.src.udtools.incident import Incident, Error, Warning, TestClass
|
| 18 |
+
from udtools.src.udtools.level5 import Level5
|
| 19 |
+
except ModuleNotFoundError:
|
| 20 |
+
import udtools.utils as utils
|
| 21 |
+
from udtools.incident import Incident, Error, Warning, TestClass
|
| 22 |
+
from udtools.level5 import Level5
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Constants for the column indices
|
| 27 |
+
COLCOUNT=10
|
| 28 |
+
ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(COLCOUNT)
|
| 29 |
+
COLNAMES='ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC'.split(',')
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Level6(Level5):
|
| 34 |
+
#==============================================================================
|
| 35 |
+
# Level 6 tests for annotation of coreference and named entities. This is
|
| 36 |
+
# tested on demand only, as the requirements are not compulsory for UD
|
| 37 |
+
# releases.
|
| 38 |
+
#==============================================================================
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def check_misc_entity(self, state):
|
| 43 |
+
"""
|
| 44 |
+
Optionally checks the well-formedness of the MISC attributes that pertain
|
| 45 |
+
to coreference and named entities.
|
| 46 |
+
|
| 47 |
+
Parameters
|
| 48 |
+
----------
|
| 49 |
+
state : udtools.state.State
|
| 50 |
+
The state of the validation run.
|
| 51 |
+
|
| 52 |
+
Reads from state
|
| 53 |
+
----------------
|
| 54 |
+
current_lines : list(str)
|
| 55 |
+
List of lines in the sentence (comments and tokens), including
|
| 56 |
+
final empty line. The lines are not expected to include the final
|
| 57 |
+
newline character.
|
| 58 |
+
First we expect an optional block (zero or more lines) of comments,
|
| 59 |
+
i.e., lines starting with '#'. Then we expect a non-empty block
|
| 60 |
+
(one or more lines) of nodes, empty nodes, and multiword tokens.
|
| 61 |
+
Finally, we expect exactly one empty line.
|
| 62 |
+
comment_start_line : int
|
| 63 |
+
The line number (relative to input file, 1-based) of the first line
|
| 64 |
+
in the current sentence, including comments if any.
|
| 65 |
+
current_token_node_table : list(list(str))
|
| 66 |
+
The list of multiword token lines / regular node lines / empty node
|
| 67 |
+
lines, each split to fields (columns).
|
| 68 |
+
sentence_line : int
|
| 69 |
+
The line number (relative to input file, 1-based) of the first
|
| 70 |
+
node/token line in the current sentence.
|
| 71 |
+
|
| 72 |
+
Reads and writes to state
|
| 73 |
+
-------------------------
|
| 74 |
+
global_entity_attribute_string : str
|
| 75 |
+
entity_attribute_number : int
|
| 76 |
+
entity_attribute_index : dict
|
| 77 |
+
entity_types : dict
|
| 78 |
+
entity_ids_this_document : dict
|
| 79 |
+
entity_ids_other_documents : dict
|
| 80 |
+
open_entity_mentions : list
|
| 81 |
+
open_discontinuous_mentions : dict
|
| 82 |
+
entity_bridge_relations : dict
|
| 83 |
+
entity_split_antecedents : dict
|
| 84 |
+
entity_mention_spans : dict
|
| 85 |
+
|
| 86 |
+
Incidents
|
| 87 |
+
---------
|
| 88 |
+
global-entity-mismatch
|
| 89 |
+
spurious-global-entity
|
| 90 |
+
entity-mwt
|
| 91 |
+
multiple-entity-statements
|
| 92 |
+
multiple-bridge-statements
|
| 93 |
+
multiple-splitante-statements
|
| 94 |
+
bridge-without-entity
|
| 95 |
+
splitante-without-entity
|
| 96 |
+
entity-without-global-entity
|
| 97 |
+
spurious-entity-statement
|
| 98 |
+
too-many-entity-attributes
|
| 99 |
+
spurious-entity-id
|
| 100 |
+
misplaced-mention-part
|
| 101 |
+
mention-attribute-mismatch
|
| 102 |
+
entity-across-newdoc
|
| 103 |
+
spurious-entity-type
|
| 104 |
+
spurious-mention-head
|
| 105 |
+
entity-type-mismatch
|
| 106 |
+
entity-identity-mismatch
|
| 107 |
+
ill-nested-entities
|
| 108 |
+
ill-nested-entities-warning
|
| 109 |
+
mention-head-out-of-range
|
| 110 |
+
same-span-entity-mentions
|
| 111 |
+
crossing-mentions-same-entity
|
| 112 |
+
spurious-bridge-statement
|
| 113 |
+
spurious-bridge-relation
|
| 114 |
+
misplaced-bridge-statement
|
| 115 |
+
repeated-bridge-relation
|
| 116 |
+
bridge-relation-mismatch
|
| 117 |
+
spurious-splitante-statement
|
| 118 |
+
spurious-splitante-relation
|
| 119 |
+
misplaced-splitante-statement
|
| 120 |
+
repeated-splitante-relation
|
| 121 |
+
only-one-split-antecedent
|
| 122 |
+
split-antecedent-mismatch
|
| 123 |
+
cross-sentence-mention
|
| 124 |
+
"""
|
| 125 |
+
Incident.default_level = 6
|
| 126 |
+
Incident.default_testclass = TestClass.COREF
|
| 127 |
+
n_comment_lines = state.sentence_line-state.comment_start_line
|
| 128 |
+
comments = state.current_lines[0:n_comment_lines]
|
| 129 |
+
iline = 0
|
| 130 |
+
sentid = ''
|
| 131 |
+
for c in comments:
|
| 132 |
+
Incident.default_lineno = state.comment_start_line+iline
|
| 133 |
+
global_entity_match = utils.crex.global_entity.fullmatch(c)
|
| 134 |
+
newdoc_match = utils.crex.newdoc.fullmatch(c)
|
| 135 |
+
sentid_match = utils.crex.sentid.fullmatch(c)
|
| 136 |
+
if global_entity_match:
|
| 137 |
+
# As a global declaration, global.Entity is expected only once per file.
|
| 138 |
+
# However, we may be processing multiple files or people may have created
|
| 139 |
+
# the file by concatening smaller files, so we will allow repeated
|
| 140 |
+
# declarations iff they are identical to the first one.
|
| 141 |
+
if state.seen_global_entity:
|
| 142 |
+
if global_entity_match.group(1) != state.global_entity_attribute_string:
|
| 143 |
+
Error(
|
| 144 |
+
state=state, config=self.incfg,
|
| 145 |
+
testid='global-entity-mismatch',
|
| 146 |
+
message=f"New declaration of global.Entity '{global_entity_match.group(1)}' does not match the first declaration '{state.global_entity_attribute_string}' on line {state.seen_global_entity}."
|
| 147 |
+
).confirm()
|
| 148 |
+
else:
|
| 149 |
+
state.seen_global_entity = state.comment_start_line + iline
|
| 150 |
+
state.global_entity_attribute_string = global_entity_match.group(1)
|
| 151 |
+
if not re.match(r"^[a-z]+(-[a-z]+)*$", state.global_entity_attribute_string):
|
| 152 |
+
Error(
|
| 153 |
+
state=state, config=self.incfg,
|
| 154 |
+
testid='spurious-global-entity',
|
| 155 |
+
message=f"Cannot parse global.Entity attribute declaration '{state.global_entity_attribute_string}'."
|
| 156 |
+
).confirm()
|
| 157 |
+
else:
|
| 158 |
+
global_entity_attributes = state.global_entity_attribute_string.split('-')
|
| 159 |
+
if not 'eid' in global_entity_attributes:
|
| 160 |
+
Error(
|
| 161 |
+
state=state, config=self.incfg,
|
| 162 |
+
testid='spurious-global-entity',
|
| 163 |
+
message=f"Global.Entity attribute declaration '{state.global_entity_attribute_string}' does not include 'eid'."
|
| 164 |
+
).confirm()
|
| 165 |
+
elif global_entity_attributes[0] != 'eid':
|
| 166 |
+
Error(
|
| 167 |
+
state=state, config=self.incfg,
|
| 168 |
+
testid='spurious-global-entity',
|
| 169 |
+
message=f"Attribute 'eid' must come first in global.Entity attribute declaration '{state.global_entity_attribute_string}'."
|
| 170 |
+
).confirm()
|
| 171 |
+
if not 'etype' in global_entity_attributes:
|
| 172 |
+
Error(
|
| 173 |
+
state=state, config=self.incfg,
|
| 174 |
+
testid='spurious-global-entity',
|
| 175 |
+
message=f"Global.Entity attribute declaration '{state.global_entity_attribute_string}' does not include 'etype'."
|
| 176 |
+
).confirm()
|
| 177 |
+
elif global_entity_attributes[1] != 'etype':
|
| 178 |
+
Error(
|
| 179 |
+
state=state, config=self.incfg,
|
| 180 |
+
testid='spurious-global-entity',
|
| 181 |
+
message=f"Attribute 'etype' must come second in global.Entity attribute declaration '{state.global_entity_attribute_string}'."
|
| 182 |
+
).confirm()
|
| 183 |
+
if not 'head' in global_entity_attributes:
|
| 184 |
+
Error(
|
| 185 |
+
state=state, config=self.incfg,
|
| 186 |
+
testid='spurious-global-entity',
|
| 187 |
+
message=f"Global.Entity attribute declaration '{state.global_entity_attribute_string}' does not include 'head'."
|
| 188 |
+
).confirm()
|
| 189 |
+
elif global_entity_attributes[2] != 'head':
|
| 190 |
+
Error(
|
| 191 |
+
state=state, config=self.incfg,
|
| 192 |
+
testid='spurious-global-entity',
|
| 193 |
+
message=f"Attribute 'head' must come third in global.Entity attribute declaration '{state.global_entity_attribute_string}'."
|
| 194 |
+
).confirm()
|
| 195 |
+
if 'other' in global_entity_attributes and global_entity_attributes[3] != 'other':
|
| 196 |
+
Error(
|
| 197 |
+
state=state, config=self.incfg,
|
| 198 |
+
testid='spurious-global-entity',
|
| 199 |
+
message=f"Attribute 'other', if present, must come fourth in global.Entity attribute declaration '{state.global_entity_attribute_string}'."
|
| 200 |
+
).confirm()
|
| 201 |
+
# Fill the global dictionary that maps attribute names to list indices.
|
| 202 |
+
i = 0
|
| 203 |
+
for a in global_entity_attributes:
|
| 204 |
+
if a in state.entity_attribute_index:
|
| 205 |
+
Error(
|
| 206 |
+
state=state, config=self.incfg,
|
| 207 |
+
testid='spurious-global-entity',
|
| 208 |
+
message=f"Attribute '{a}' occurs more than once in global.Entity attribute declaration '{state.global_entity_attribute_string}'."
|
| 209 |
+
).confirm()
|
| 210 |
+
else:
|
| 211 |
+
state.entity_attribute_index[a] = i
|
| 212 |
+
i += 1
|
| 213 |
+
state.entity_attribute_number = len(global_entity_attributes)
|
| 214 |
+
elif newdoc_match:
|
| 215 |
+
for eid in state.entity_ids_this_document:
|
| 216 |
+
state.entity_ids_other_documents[eid] = state.entity_ids_this_document[eid]
|
| 217 |
+
state.entity_ids_this_document = {}
|
| 218 |
+
elif sentid_match:
|
| 219 |
+
sentid = sentid_match.group(1)
|
| 220 |
+
iline += 1
|
| 221 |
+
for iline in range(len(state.current_token_node_table)):
|
| 222 |
+
cols = state.current_token_node_table[iline]
|
| 223 |
+
Incident.default_lineno = state.sentence_line+iline
|
| 224 |
+
# Add the current word to all currently open mentions. We will use it in error messages.
|
| 225 |
+
# Do this for regular and empty nodes but not for multi-word-token lines.
|
| 226 |
+
if not utils.is_multiword_token(cols):
|
| 227 |
+
for m in state.open_entity_mentions:
|
| 228 |
+
m['span'].append(cols[ID])
|
| 229 |
+
m['text'] += ' '+cols[FORM]
|
| 230 |
+
m['length'] += 1
|
| 231 |
+
misc = cols[MISC].split('|')
|
| 232 |
+
entity = [x for x in misc if re.match(r"^Entity=", x)]
|
| 233 |
+
bridge = [x for x in misc if re.match(r"^Bridge=", x)]
|
| 234 |
+
splitante = [x for x in misc if re.match(r"^SplitAnte=", x)]
|
| 235 |
+
if utils.is_multiword_token(cols) and (len(entity)>0 or len(bridge)>0 or len(splitante)>0):
|
| 236 |
+
Error(
|
| 237 |
+
state=state, config=self.incfg,
|
| 238 |
+
testid='entity-mwt',
|
| 239 |
+
message="Entity or coreference annotation must not occur at a multiword-token line."
|
| 240 |
+
).confirm()
|
| 241 |
+
continue
|
| 242 |
+
if len(entity)>1:
|
| 243 |
+
Error(
|
| 244 |
+
state=state, config=self.incfg,
|
| 245 |
+
testid='multiple-entity-statements',
|
| 246 |
+
message=f"There can be at most one 'Entity=' statement in MISC but we have {str(misc)}."
|
| 247 |
+
).confirm()
|
| 248 |
+
continue
|
| 249 |
+
if len(bridge)>1:
|
| 250 |
+
Error(
|
| 251 |
+
state=state, config=self.incfg,
|
| 252 |
+
testid='multiple-bridge-statements',
|
| 253 |
+
message=f"There can be at most one 'Bridge=' statement in MISC but we have {str(misc)}."
|
| 254 |
+
).confirm()
|
| 255 |
+
continue
|
| 256 |
+
if len(splitante)>1:
|
| 257 |
+
Error(
|
| 258 |
+
state=state, config=self.incfg,
|
| 259 |
+
testid='multiple-splitante-statements',
|
| 260 |
+
message=f"There can be at most one 'SplitAnte=' statement in MISC but we have {str(misc)}."
|
| 261 |
+
).confirm()
|
| 262 |
+
continue
|
| 263 |
+
if len(bridge)>0 and len(entity)==0:
|
| 264 |
+
Error(
|
| 265 |
+
state=state, config=self.incfg,
|
| 266 |
+
testid='bridge-without-entity',
|
| 267 |
+
message=f"The 'Bridge=' statement can only occur together with 'Entity=' in MISC but we have {str(misc)}."
|
| 268 |
+
).confirm()
|
| 269 |
+
continue
|
| 270 |
+
if len(splitante)>0 and len(entity)==0:
|
| 271 |
+
Error(
|
| 272 |
+
state=state, config=self.incfg,
|
| 273 |
+
testid='splitante-without-entity',
|
| 274 |
+
message=f"The 'SplitAnte=' statement can only occur together with 'Entity=' in MISC but we have {str(misc)}."
|
| 275 |
+
).confirm()
|
| 276 |
+
continue
|
| 277 |
+
# There is at most one Entity (and only if it is there, there may be also one Bridge and/or one SplitAnte).
|
| 278 |
+
if len(entity)>0:
|
| 279 |
+
if not state.seen_global_entity:
|
| 280 |
+
Error(
|
| 281 |
+
state=state, config=self.incfg,
|
| 282 |
+
testid='entity-without-global-entity',
|
| 283 |
+
message="No global.Entity comment was found before the first 'Entity' in MISC."
|
| 284 |
+
).confirm()
|
| 285 |
+
continue
|
| 286 |
+
match = re.match(r"^Entity=((?:\([^( )]+(?:-[^( )]+)*\)?|[^( )]+\))+)$", entity[0])
|
| 287 |
+
if not match:
|
| 288 |
+
Error(
|
| 289 |
+
state=state, config=self.incfg,
|
| 290 |
+
testid='spurious-entity-statement',
|
| 291 |
+
message=f"Cannot parse the Entity statement '{entity[0]}'."
|
| 292 |
+
).confirm()
|
| 293 |
+
else:
|
| 294 |
+
entity_string = match.group(1)
|
| 295 |
+
# We cannot check the rest if we cannot identify the 'eid' attribute.
|
| 296 |
+
if 'eid' not in state.entity_attribute_index:
|
| 297 |
+
continue
|
| 298 |
+
# Items of entities are pairs of [012] and a string.
|
| 299 |
+
# 0 ... opening bracket; 1 ... closing bracket; 2 ... both brackets
|
| 300 |
+
entities = []
|
| 301 |
+
while entity_string:
|
| 302 |
+
match = re.match(r"^\(([^( )]+(-[^( )]+)*)\)", entity_string)
|
| 303 |
+
if match:
|
| 304 |
+
entities.append((2, match.group(1)))
|
| 305 |
+
entity_string = re.sub(r"^\([^( )]+(-[^( )]+)*\)", '', entity_string, count=1)
|
| 306 |
+
continue
|
| 307 |
+
match = re.match(r"^\(([^( )]+(-[^( )]+)*)", entity_string)
|
| 308 |
+
if match:
|
| 309 |
+
entities.append((0, match.group(1)))
|
| 310 |
+
entity_string = re.sub(r"^\([^( )]+(-[^( )]+)*", '', entity_string, count=1)
|
| 311 |
+
continue
|
| 312 |
+
match = re.match(r"^([^( )]+)\)", entity_string)
|
| 313 |
+
if match:
|
| 314 |
+
entities.append((1, match.group(1)))
|
| 315 |
+
entity_string = re.sub(r"^[^( )]+\)", '', entity_string, count=1)
|
| 316 |
+
continue
|
| 317 |
+
# If we pre-checked the string well, we should never arrive here!
|
| 318 |
+
Error(
|
| 319 |
+
state=state, config=self.incfg,
|
| 320 |
+
testid='internal-error',
|
| 321 |
+
message='INTERNAL ERROR'
|
| 322 |
+
).confirm()
|
| 323 |
+
# All 1 cases should precede all 0 cases.
|
| 324 |
+
# The 2 cases can be either before the first 1 case, or after the last 0 case.
|
| 325 |
+
seen0 = False
|
| 326 |
+
seen1 = False
|
| 327 |
+
seen2 = False
|
| 328 |
+
# To be able to check validity of Bridge and SplitAnte, we will hash eids of mentions that start here.
|
| 329 |
+
# To be able to check that no two mentions have the same span, we will hash start-end intervals for mentions that end here.
|
| 330 |
+
starting_mentions = {}
|
| 331 |
+
ending_mentions = {}
|
| 332 |
+
for b, e in entities:
|
| 333 |
+
# First get attributes, entity id, and if applicable, part of discontinuous mention.
|
| 334 |
+
attributes = e.split('-')
|
| 335 |
+
if b==0 or b==2:
|
| 336 |
+
# Fewer attributes are allowed because trailing empty values can be omitted.
|
| 337 |
+
# More attributes are not allowed.
|
| 338 |
+
if len(attributes) > state.entity_attribute_number:
|
| 339 |
+
Error(
|
| 340 |
+
state=state, config=self.incfg,
|
| 341 |
+
testid='too-many-entity-attributes',
|
| 342 |
+
message=f"Entity '{e}' has {len(attributes)} attributes while only {state.entity_attribute_number} attributes are globally declared."
|
| 343 |
+
).confirm()
|
| 344 |
+
# The raw eid (bracket eid) may include an identification of a part of a discontinuous mention,
|
| 345 |
+
# as in 'e155[1/2]'. This is fine for matching opening and closing brackets
|
| 346 |
+
# because the closing bracket must contain it too. However, to identify the
|
| 347 |
+
# cluster, we need to take the real id.
|
| 348 |
+
beid = attributes[state.entity_attribute_index['eid']]
|
| 349 |
+
else:
|
| 350 |
+
# No attributes other than eid are expected at the closing bracket.
|
| 351 |
+
if len(attributes) > 1:
|
| 352 |
+
Error(
|
| 353 |
+
state=state, config=self.incfg,
|
| 354 |
+
testid='too-many-entity-attributes',
|
| 355 |
+
message=f"Entity '{e}' has {len(attributes)} attributes while only eid is expected at the closing bracket."
|
| 356 |
+
).confirm()
|
| 357 |
+
beid = attributes[0]
|
| 358 |
+
eid = beid
|
| 359 |
+
ipart = 1
|
| 360 |
+
npart = 1
|
| 361 |
+
eidnpart = eid
|
| 362 |
+
match = re.match(r"^(.+)\[([1-9]\d*)/([1-9]\d*)\]$", beid)
|
| 363 |
+
if match:
|
| 364 |
+
eid = match.group(1)
|
| 365 |
+
ipart = int(match.group(2))
|
| 366 |
+
npart = int(match.group(3))
|
| 367 |
+
eidnpart = eid+'['+match.group(3)+']'
|
| 368 |
+
# We should omit the square brackets if they would be [1/1].
|
| 369 |
+
if ipart == 1 and npart == 1:
|
| 370 |
+
Error(
|
| 371 |
+
state=state, config=self.incfg,
|
| 372 |
+
testid='spurious-entity-id',
|
| 373 |
+
message=f"Discontinuous mention must have at least two parts but it has one in '{beid}'."
|
| 374 |
+
).confirm()
|
| 375 |
+
if ipart > npart:
|
| 376 |
+
Error(
|
| 377 |
+
state=state, config=self.incfg,
|
| 378 |
+
testid='spurious-entity-id',
|
| 379 |
+
message=f"Entity id '{beid}' of discontinuous mention says the current part is higher than total number of parts."
|
| 380 |
+
).confirm()
|
| 381 |
+
else:
|
| 382 |
+
if re.match(r"[\[\]]", beid):
|
| 383 |
+
Error(
|
| 384 |
+
state=state, config=self.incfg,
|
| 385 |
+
testid='spurious-entity-id',
|
| 386 |
+
message=f"Entity id '{beid}' contains square brackets but does not have the form used in discontinuous mentions."
|
| 387 |
+
).confirm()
|
| 388 |
+
|
| 389 |
+
#--------------------------------------------------------------------------------------------------------------------------------
|
| 390 |
+
# The code that we will have to execute at single-node continuous parts and at the opening brackets of multi-node continuous parts.
|
| 391 |
+
# We assume that we have already parsed beid and established whether this is a part of a discontinuous mention.
|
| 392 |
+
def opening_bracket():
|
| 393 |
+
attrstring_to_match = ''
|
| 394 |
+
# If this is a part of a discontinuous mention, remember the attribute string.
|
| 395 |
+
# At the beginning of each part, we will check that its attribute string is identical to the first part.
|
| 396 |
+
if npart > 1:
|
| 397 |
+
# We want to check that values of all attributes are same in all parts (except the eid which differs in the brackets).
|
| 398 |
+
attributes_without_eid = [attributes[i] for i in range(len(attributes)) if i != state.entity_attribute_index['eid']]
|
| 399 |
+
# For better readability of the error messages, reintroduce eid anyway, but without the brackets.
|
| 400 |
+
attrstring_to_match = eid+'-'+('-'.join(attributes_without_eid))
|
| 401 |
+
if ipart == 1:
|
| 402 |
+
# If this is the first part, create a new record for the mention in the global dictionary.
|
| 403 |
+
# We actually keep a stack of open mentions with the same eidnpart because they may be nested.
|
| 404 |
+
# The length and the span of the mention will be updated when we encounter the closing bracket of the current part.
|
| 405 |
+
discontinuous_mention = {'last_ipart': 1, 'npart': npart,
|
| 406 |
+
'first_part_line': state.sentence_line+iline,
|
| 407 |
+
'last_part_line': state.sentence_line+iline,
|
| 408 |
+
'attributes': attrstring_to_match,
|
| 409 |
+
'length': 0, 'span': []}
|
| 410 |
+
if eidnpart in state.open_discontinuous_mentions:
|
| 411 |
+
state.open_discontinuous_mentions[eidnpart].append(discontinuous_mention)
|
| 412 |
+
else:
|
| 413 |
+
state.open_discontinuous_mentions[eidnpart] = [discontinuous_mention]
|
| 414 |
+
else:
|
| 415 |
+
if eidnpart in state.open_discontinuous_mentions:
|
| 416 |
+
discontinuous_mention = state.open_discontinuous_mentions[eidnpart][-1]
|
| 417 |
+
if ipart != discontinuous_mention['last_ipart']+1:
|
| 418 |
+
Error(
|
| 419 |
+
state=state, config=self.incfg,
|
| 420 |
+
testid='misplaced-mention-part',
|
| 421 |
+
message=f"Unexpected part of discontinuous mention '{beid}': last part was '{discontinuous_mention['last_ipart']}/{discontinuous_mention['npart']}' on line {discontinuous_mention['last_part_line']}."
|
| 422 |
+
).confirm()
|
| 423 |
+
# We will update last_ipart at closing bracket, i.e., after the current part has been entirely processed.
|
| 424 |
+
# Otherwise nested discontinuous mentions might wrongly assess where they belong.
|
| 425 |
+
elif attrstring_to_match != discontinuous_mention['attributes']:
|
| 426 |
+
Error(
|
| 427 |
+
state=state, config=self.incfg,
|
| 428 |
+
testid='mention-attribute-mismatch',
|
| 429 |
+
message=f"Attribute mismatch of discontinuous mention: current part has '{attrstring_to_match}', first part '{discontinuous_mention['attributes']}' was at line {discontinuous_mention['first_part_line']}."
|
| 430 |
+
).confirm()
|
| 431 |
+
else:
|
| 432 |
+
Error(
|
| 433 |
+
state=state, config=self.incfg,
|
| 434 |
+
testid='misplaced-mention-part',
|
| 435 |
+
message=f"Unexpected part of discontinuous mention '{beid}': this is part {ipart} but we do not have information about the previous parts."
|
| 436 |
+
).confirm()
|
| 437 |
+
discontinuous_mention = {'last_ipart': ipart, 'npart': npart,
|
| 438 |
+
'first_part_line': state.sentence_line+iline,
|
| 439 |
+
'last_part_line': state.sentence_line+iline,
|
| 440 |
+
'attributes': attrstring_to_match,
|
| 441 |
+
'length': 0, 'span': []}
|
| 442 |
+
state.open_discontinuous_mentions[eidnpart] = [discontinuous_mention]
|
| 443 |
+
# Check all attributes of the entity, except those that must be examined at the closing bracket.
|
| 444 |
+
if eid in state.entity_ids_other_documents:
|
| 445 |
+
Error(
|
| 446 |
+
state=state, config=self.incfg,
|
| 447 |
+
testid='entity-across-newdoc',
|
| 448 |
+
message=f"Same entity id should not occur in multiple documents; '{eid}' first seen on line {state.entity_ids_other_documents[eid]}, before the last newdoc."
|
| 449 |
+
).confirm()
|
| 450 |
+
elif not eid in state.entity_ids_this_document:
|
| 451 |
+
state.entity_ids_this_document[eid] = state.sentence_line+iline
|
| 452 |
+
etype = ''
|
| 453 |
+
identity = ''
|
| 454 |
+
if 'etype' in state.entity_attribute_index and len(attributes) >= state.entity_attribute_index['etype']+1:
|
| 455 |
+
etype = attributes[state.entity_attribute_index['etype']]
|
| 456 |
+
# For etype values tentatively approved for CorefUD 1.0, see
|
| 457 |
+
# https://github.com/ufal/corefUD/issues/13#issuecomment-1008447464
|
| 458 |
+
if not re.match(r"^(person|place|organization|animal|plant|object|substance|time|number|abstract|event|other)?$", etype):
|
| 459 |
+
Error(
|
| 460 |
+
state=state, config=self.incfg,
|
| 461 |
+
testid='spurious-entity-type',
|
| 462 |
+
message=f"Spurious entity type '{etype}'."
|
| 463 |
+
).confirm()
|
| 464 |
+
if 'identity' in state.entity_attribute_index and len(attributes) >= state.entity_attribute_index['identity']+1:
|
| 465 |
+
identity = attributes[state.entity_attribute_index['identity']]
|
| 466 |
+
# Check the form of the head index now.
|
| 467 |
+
# The value will be checked at the end of the mention,
|
| 468 |
+
# when we know the mention length.
|
| 469 |
+
head = 0
|
| 470 |
+
if 'head' in state.entity_attribute_index and len(attributes) >= state.entity_attribute_index['head']+1:
|
| 471 |
+
if not re.match(r"^[1-9][0-9]*$", attributes[state.entity_attribute_index['head']]):
|
| 472 |
+
Error(
|
| 473 |
+
state=state, config=self.incfg,
|
| 474 |
+
testid='spurious-mention-head',
|
| 475 |
+
message=f"Entity head index '{attributes[state.entity_attribute_index['head']]}' must be a non-zero-starting integer."
|
| 476 |
+
).confirm()
|
| 477 |
+
else:
|
| 478 |
+
head = int(attributes[state.entity_attribute_index['head']])
|
| 479 |
+
# If this is the first mention of the entity, remember the values
|
| 480 |
+
# of the attributes that should be identical at all mentions.
|
| 481 |
+
if not eid in state.entity_types:
|
| 482 |
+
state.entity_types[eid] = (etype, identity, state.sentence_line+iline)
|
| 483 |
+
else:
|
| 484 |
+
# All mentions of one entity (cluster) must have the same entity type.
|
| 485 |
+
if etype != state.entity_types[eid][0]:
|
| 486 |
+
Error(
|
| 487 |
+
state=state, config=self.incfg,
|
| 488 |
+
testid='entity-type-mismatch',
|
| 489 |
+
message=f"Entity '{eid}' cannot have type '{etype}' that does not match '{state.entity_types[eid][0]}' from the first mention on line {state.entity_types[eid][2]}."
|
| 490 |
+
).confirm()
|
| 491 |
+
# All mentions of one entity (cluster) must have the same identity (Wikipedia link or similar).
|
| 492 |
+
if identity != state.entity_types[eid][1]:
|
| 493 |
+
Error(
|
| 494 |
+
state=state, config=self.incfg,
|
| 495 |
+
testid='entity-identity-mismatch',
|
| 496 |
+
message=f"Entity '{eid}' cannot have identity '{identity}' that does not match '{state.entity_types[eid][1]}' from the first mention on line {state.entity_types[eid][2]}."
|
| 497 |
+
).confirm()
|
| 498 |
+
# Remember the line where (the current part of) the entity mention starts.
|
| 499 |
+
mention = {'beid': beid, 'line': state.sentence_line+iline,
|
| 500 |
+
'span': [cols[ID]], 'text': cols[FORM],
|
| 501 |
+
'length': 1, 'head': head, 'attrstring': attrstring_to_match}
|
| 502 |
+
state.open_entity_mentions.append(mention)
|
| 503 |
+
# The set of mentions starting at the current line will be needed later when checking Bridge and SplitAnte statements.
|
| 504 |
+
if ipart == 1:
|
| 505 |
+
starting_mentions[eid] = True
|
| 506 |
+
|
| 507 |
+
#--------------------------------------------------------------------------------------------------------------------------------
|
| 508 |
+
# The code that we will have to execute at single-node continuous parts and at the closing brackets of multi-node continuous parts.
|
| 509 |
+
def closing_bracket():
|
| 510 |
+
# Find the corresponding opening bracket and extract the information we need to know.
|
| 511 |
+
mention_length = 0
|
| 512 |
+
mention_span = []
|
| 513 |
+
head = 0
|
| 514 |
+
opening_line = 0
|
| 515 |
+
if len(state.open_entity_mentions)==0:
|
| 516 |
+
Error(
|
| 517 |
+
state=state, config=self.incfg,
|
| 518 |
+
testid='ill-nested-entities',
|
| 519 |
+
message=f"Cannot close entity '{beid}' because there are no open entities."
|
| 520 |
+
).confirm()
|
| 521 |
+
return
|
| 522 |
+
else:
|
| 523 |
+
# If the closing bracket does not occur where expected, it is currently only a warning.
|
| 524 |
+
# We have crossing mention spans in CorefUD 1.0 and it has not been decided yet whether all of them should be illegal.
|
| 525 |
+
###!!! Note that this will not catch ill-nested mentions whose only intersection is one node. The bracketing will
|
| 526 |
+
###!!! not be a problem in such cases because one mention will be closed first, then the other will be opened.
|
| 527 |
+
if beid != state.open_entity_mentions[-1]['beid']:
|
| 528 |
+
Warning(
|
| 529 |
+
state=state, config=self.incfg,
|
| 530 |
+
testclass=TestClass.COREF,
|
| 531 |
+
testid='ill-nested-entities-warning',
|
| 532 |
+
message=f"Entity mentions are not well nested: closing '{beid}' while the innermost open entity is '{state.open_entity_mentions[-1]['beid']}' from line {state.open_entity_mentions[-1]['line']}: {str(state.open_entity_mentions)}."
|
| 533 |
+
).confirm()
|
| 534 |
+
# Try to find and close the entity whether or not it was well-nested.
|
| 535 |
+
for i in reversed(range(len(state.open_entity_mentions))):
|
| 536 |
+
if state.open_entity_mentions[i]['beid'] == beid:
|
| 537 |
+
mention_length = state.open_entity_mentions[i]['length']
|
| 538 |
+
mention_span = state.open_entity_mentions[i]['span']
|
| 539 |
+
head = state.open_entity_mentions[i]['head']
|
| 540 |
+
opening_line = state.open_entity_mentions[i]['line']
|
| 541 |
+
state.open_entity_mentions.pop(i)
|
| 542 |
+
break
|
| 543 |
+
else:
|
| 544 |
+
# If we did not find the entity to close, then the warning above was not enough and we have to make it a validation error.
|
| 545 |
+
Error(
|
| 546 |
+
state=state, config=self.incfg,
|
| 547 |
+
testid='ill-nested-entities',
|
| 548 |
+
message=f"Cannot close entity '{beid}' because it was not found among open entities: {str(state.open_entity_mentions)}"
|
| 549 |
+
).confirm()
|
| 550 |
+
return
|
| 551 |
+
# If this is a part of a discontinuous mention, update the information about the whole mention.
|
| 552 |
+
# We do this after reading the new part (and not when we see its opening bracket) so that nested
|
| 553 |
+
# discontinuous mentions of the same entity are possible.
|
| 554 |
+
if npart > 1:
|
| 555 |
+
# Update the attributes that have to be updated after each part.
|
| 556 |
+
if eidnpart in state.open_discontinuous_mentions:
|
| 557 |
+
discontinuous_mention = state.open_discontinuous_mentions[eidnpart][-1]
|
| 558 |
+
discontinuous_mention['last_ipart'] = ipart
|
| 559 |
+
discontinuous_mention['last_part_line'] = opening_line
|
| 560 |
+
discontinuous_mention['length'] += mention_length
|
| 561 |
+
discontinuous_mention['span'] += mention_span
|
| 562 |
+
else:
|
| 563 |
+
# This should have been taken care of at the opening bracket.
|
| 564 |
+
Error(
|
| 565 |
+
state=state, config=self.incfg,
|
| 566 |
+
testclass=TestClass.INTERNAL,
|
| 567 |
+
testid='internal-error',
|
| 568 |
+
message="INTERNAL ERROR: at the closing bracket of a part of a discontinuous mention, still no record in state.open_discontinuous_mentions."
|
| 569 |
+
).confirm()
|
| 570 |
+
discontinuous_mention = {'last_ipart': ipart, 'npart': npart,
|
| 571 |
+
'first_part_line': opening_line,
|
| 572 |
+
'last_part_line': opening_line,
|
| 573 |
+
'attributes': '', 'length': mention_length,
|
| 574 |
+
'span': mention_span}
|
| 575 |
+
state.open_discontinuous_mentions[eidnpart] = [discontinuous_mention]
|
| 576 |
+
# Update mention_length and mention_span to reflect the whole span up to this point rather than just the last part.
|
| 577 |
+
mention_length = state.open_discontinuous_mentions[eidnpart][-1]['length']
|
| 578 |
+
mention_span = state.open_discontinuous_mentions[eidnpart][-1]['span']
|
| 579 |
+
# We need to know the length (number of nodes) of the mention to check whether the head attribute is within limits.
|
| 580 |
+
# We need to know the span (list of nodes) of the mention to check that no two mentions have the same span.
|
| 581 |
+
# We only check these requirements after the last part of the discontinuous span (or after the single part of a continuous one).
|
| 582 |
+
if ipart == npart:
|
| 583 |
+
if mention_length < head:
|
| 584 |
+
Error(
|
| 585 |
+
state=state, config=self.incfg,
|
| 586 |
+
testid='mention-head-out-of-range',
|
| 587 |
+
message=f"Entity mention head was specified as {head} on line {opening_line} but the mention has only {mention_length} nodes."
|
| 588 |
+
).confirm()
|
| 589 |
+
# Check that no two mentions have identical spans (only if this is the last part of a mention).
|
| 590 |
+
ending_mention_key = str(opening_line)+str(mention_span)
|
| 591 |
+
if ending_mention_key in ending_mentions:
|
| 592 |
+
Error(
|
| 593 |
+
state=state, config=self.incfg,
|
| 594 |
+
testid='same-span-entity-mentions',
|
| 595 |
+
message=f"Entity mentions '{ending_mentions[ending_mention_key]}' and '{beid}' from line {opening_line} have the same span {str(mention_span)}."
|
| 596 |
+
).confirm()
|
| 597 |
+
else:
|
| 598 |
+
ending_mentions[ending_mention_key] = beid
|
| 599 |
+
# Remember the span of the current mention so that we can later check whether it crosses the span of another mention.
|
| 600 |
+
# Use the current sentence id to partially qualify the node ids. It will not work well for mentions that span multiple
|
| 601 |
+
# sentences but we do not expect cross-sentence mentions to be frequent.
|
| 602 |
+
myset = set(mention_span)
|
| 603 |
+
# Check whether any other mention of the same entity has span that crosses the current one.
|
| 604 |
+
if eid in state.entity_mention_spans:
|
| 605 |
+
if sentid in state.entity_mention_spans[eid]:
|
| 606 |
+
for m in state.entity_mention_spans[eid][sentid]:
|
| 607 |
+
ms = state.entity_mention_spans[eid][sentid][m]
|
| 608 |
+
if ms.intersection(myset) and not ms.issubset(myset) and not myset.issubset(ms):
|
| 609 |
+
Error(
|
| 610 |
+
state=state, config=self.incfg,
|
| 611 |
+
testid='crossing-mentions-same-entity',
|
| 612 |
+
message=f"Mentions of entity '{eid}' have crossing spans: {m} vs. {str(mention_span)}."
|
| 613 |
+
).confirm()
|
| 614 |
+
else:
|
| 615 |
+
state.entity_mention_spans[eid][sentid] = {}
|
| 616 |
+
else:
|
| 617 |
+
state.entity_mention_spans[eid] = {}
|
| 618 |
+
state.entity_mention_spans[eid][sentid] = {}
|
| 619 |
+
state.entity_mention_spans[eid][sentid][str(mention_span)] = myset
|
| 620 |
+
# At the end of the last part of a discontinuous mention, remove the information about the mention.
|
| 621 |
+
if npart > 1 and ipart == npart:
|
| 622 |
+
if eidnpart in state.open_discontinuous_mentions:
|
| 623 |
+
if len(state.open_discontinuous_mentions[eidnpart]) > 1:
|
| 624 |
+
state.open_discontinuous_mentions[eidnpart].pop()
|
| 625 |
+
else:
|
| 626 |
+
state.open_discontinuous_mentions.pop(eidnpart)
|
| 627 |
+
#--------------------------------------------------------------------------------------------------------------------------------
|
| 628 |
+
|
| 629 |
+
# Now we know the beid, eid, as well as all other attributes.
|
| 630 |
+
# We can check the well-nestedness of brackets.
|
| 631 |
+
if b==0:
|
| 632 |
+
if seen2 and not seen1:
|
| 633 |
+
Error(
|
| 634 |
+
state=state, config=self.incfg,
|
| 635 |
+
testid='spurious-entity-statement',
|
| 636 |
+
message=f"If there are no closing entity brackets, single-node entity must follow all opening entity brackets in '{entity[0]}'."
|
| 637 |
+
).confirm()
|
| 638 |
+
if seen0 and seen2:
|
| 639 |
+
Error(
|
| 640 |
+
state=state, config=self.incfg,
|
| 641 |
+
testid='spurious-entity-statement',
|
| 642 |
+
message=f"Single-node entity must either precede all closing entity brackets or follow all opening entity brackets in '{entity[0]}'."
|
| 643 |
+
).confirm()
|
| 644 |
+
seen0 = True
|
| 645 |
+
seen2 = False
|
| 646 |
+
opening_bracket()
|
| 647 |
+
elif b==2:
|
| 648 |
+
if seen1 and not seen0:
|
| 649 |
+
Error(
|
| 650 |
+
state=state, config=self.incfg,
|
| 651 |
+
testid='spurious-entity-statement',
|
| 652 |
+
message=f"If there are no opening entity brackets, single-node entity must precede all closing entity brackets in '{entity[0]}'."
|
| 653 |
+
).confirm()
|
| 654 |
+
seen2 = True
|
| 655 |
+
opening_bracket()
|
| 656 |
+
closing_bracket()
|
| 657 |
+
else: # b==1
|
| 658 |
+
if seen0:
|
| 659 |
+
Error(
|
| 660 |
+
state=state, config=self.incfg,
|
| 661 |
+
testid='spurious-entity-statement',
|
| 662 |
+
message=f"All closing entity brackets must precede all opening entity brackets in '{entity[0]}'."
|
| 663 |
+
).confirm()
|
| 664 |
+
seen1 = True
|
| 665 |
+
closing_bracket()
|
| 666 |
+
# Now we are done with checking the 'Entity=' statement.
|
| 667 |
+
# If there are also 'Bridge=' or 'SplitAnte=' statements, check them too.
|
| 668 |
+
if len(bridge) > 0:
|
| 669 |
+
match = re.match(r"^Bridge=([^(< :>)]+<[^(< :>)]+(:[a-z]+)?(,[^(< :>)]+<[^(< :>)]+(:[a-z]+)?)*)$", bridge[0])
|
| 670 |
+
if not match:
|
| 671 |
+
Error(
|
| 672 |
+
state=state, config=self.incfg,
|
| 673 |
+
testid='spurious-bridge-statement',
|
| 674 |
+
message=f"Cannot parse the Bridge statement '{bridge[0]}'."
|
| 675 |
+
).confirm()
|
| 676 |
+
else:
|
| 677 |
+
bridges = match.group(1).split(',')
|
| 678 |
+
# Hash src<tgt pairs and make sure they are not repeated.
|
| 679 |
+
srctgt = {}
|
| 680 |
+
for b in bridges:
|
| 681 |
+
match = re.match(r"([^(< :>)]+)<([^(< :>)]+)(?::([a-z]+))?^$", b)
|
| 682 |
+
if match:
|
| 683 |
+
srceid = match.group(1)
|
| 684 |
+
tgteid = match.group(2)
|
| 685 |
+
relation = match.group(3) # optional
|
| 686 |
+
bridgekey = srceid+'<'+tgteid
|
| 687 |
+
if srceid == tgteid:
|
| 688 |
+
Error(
|
| 689 |
+
state=state, config=self.incfg,
|
| 690 |
+
testid='spurious-bridge-relation',
|
| 691 |
+
message=f"Bridge must not point from an entity to itself: '{b}'."
|
| 692 |
+
).confirm()
|
| 693 |
+
if not tgteid in starting_mentions:
|
| 694 |
+
Error(
|
| 695 |
+
state=state, config=self.incfg,
|
| 696 |
+
testid='misplaced-bridge-statement',
|
| 697 |
+
message=f"Bridge relation '{b}' must be annotated at the beginning of a mention of entity '{tgteid}'."
|
| 698 |
+
).confirm()
|
| 699 |
+
if bridgekey in srctgt:
|
| 700 |
+
Error(
|
| 701 |
+
state=state, config=self.incfg,
|
| 702 |
+
testid='repeated-bridge-relation',
|
| 703 |
+
message=f"Bridge relation '{bridgekey}' must not be repeated in '{b}'."
|
| 704 |
+
).confirm()
|
| 705 |
+
else:
|
| 706 |
+
srctgt[bridgekey] = True
|
| 707 |
+
# Check in the global dictionary whether this relation has been specified at another mention.
|
| 708 |
+
if bridgekey in state.entity_bridge_relations:
|
| 709 |
+
if relation != state.entity_bridge_relations[bridgekey]['relation']:
|
| 710 |
+
Error(
|
| 711 |
+
state=state, config=self.incfg,
|
| 712 |
+
testid='bridge-relation-mismatch',
|
| 713 |
+
message=f"Bridge relation '{b}' type does not match '{state.entity_bridge_relations[bridgekey]['relation']}' specified earlier on line {state.entity_bridge_relations[bridgekey]['line']}."
|
| 714 |
+
).confirm()
|
| 715 |
+
else:
|
| 716 |
+
state.entity_bridge_relations[bridgekey] = {'relation': relation, 'line': state.sentence_line+iline}
|
| 717 |
+
if len(splitante) > 0:
|
| 718 |
+
match = re.match(r"^SplitAnte=([^(< :>)]+<[^(< :>)]+(,[^(< :>)]+<[^(< :>)]+)*)$", splitante[0])
|
| 719 |
+
if not match:
|
| 720 |
+
Error(
|
| 721 |
+
state=state, config=self.incfg,
|
| 722 |
+
testid='spurious-splitante-statement',
|
| 723 |
+
message=f"Cannot parse the SplitAnte statement '{splitante[0]}'."
|
| 724 |
+
).confirm()
|
| 725 |
+
else:
|
| 726 |
+
antecedents = match.group(1).split(',')
|
| 727 |
+
# Hash src<tgt pairs and make sure they are not repeated. Also remember the number of antecedents for each target.
|
| 728 |
+
srctgt = {}
|
| 729 |
+
tgtante = {}
|
| 730 |
+
for a in antecedents:
|
| 731 |
+
match = re.match(r"^([^(< :>)]+)<([^(< :>)]+)$", a)
|
| 732 |
+
if match:
|
| 733 |
+
srceid = match.group(1)
|
| 734 |
+
tgteid = match.group(2)
|
| 735 |
+
if srceid == tgteid:
|
| 736 |
+
Error(
|
| 737 |
+
state=state, config=self.incfg,
|
| 738 |
+
testid='spurious-splitante-relation',
|
| 739 |
+
message=f"SplitAnte must not point from an entity to itself: '{srceid}<{tgteid}'."
|
| 740 |
+
).confirm()
|
| 741 |
+
elif not tgteid in starting_mentions:
|
| 742 |
+
Error(
|
| 743 |
+
state=state, config=self.incfg,
|
| 744 |
+
testid='misplaced-splitante-statement',
|
| 745 |
+
message=f"SplitAnte relation '{a}' must be annotated at the beginning of a mention of entity '{tgteid}'."
|
| 746 |
+
).confirm()
|
| 747 |
+
if srceid+'<'+tgteid in srctgt:
|
| 748 |
+
str_antecedents = ','.join(antecedents)
|
| 749 |
+
Error(
|
| 750 |
+
state=state, config=self.incfg,
|
| 751 |
+
testid='repeated-splitante-relation',
|
| 752 |
+
message=f"SplitAnte relation '{srceid}<{tgteid}' must not be repeated in '{str_antecedents}'."
|
| 753 |
+
).confirm()
|
| 754 |
+
else:
|
| 755 |
+
srctgt[srceid+'<'+tgteid] = True
|
| 756 |
+
if tgteid in tgtante:
|
| 757 |
+
tgtante[tgteid].append(srceid)
|
| 758 |
+
else:
|
| 759 |
+
tgtante[tgteid] = [srceid]
|
| 760 |
+
for tgteid in tgtante:
|
| 761 |
+
if len(tgtante[tgteid]) == 1:
|
| 762 |
+
str_antecedents = ','.join(antecedents)
|
| 763 |
+
Error(
|
| 764 |
+
state=state, config=self.incfg,
|
| 765 |
+
testid='only-one-split-antecedent',
|
| 766 |
+
message=f"SplitAnte statement '{str_antecedents}' must specify at least two antecedents for entity '{tgteid}'."
|
| 767 |
+
).confirm()
|
| 768 |
+
# Check in the global dictionary whether this relation has been specified at another mention.
|
| 769 |
+
tgtante[tgteid].sort()
|
| 770 |
+
if tgteid in state.entity_split_antecedents:
|
| 771 |
+
if tgtante[tgteid] != state.entity_split_antecedents[tgteid]['antecedents']:
|
| 772 |
+
Error(
|
| 773 |
+
state=state, config=self.incfg,
|
| 774 |
+
testid='split-antecedent-mismatch',
|
| 775 |
+
message=f"Split antecedent of entity '{tgteid}' does not match '{state.entity_split_antecedents[tgteid]['antecedents']}' specified earlier on line {state.entity_split_antecedents[tgteid]['line']}."
|
| 776 |
+
).confirm()
|
| 777 |
+
else:
|
| 778 |
+
state.entity_split_antecedents[tgteid] = {'antecedents': str(tgtante[tgteid]), 'line': state.sentence_line+iline}
|
| 779 |
+
if len(state.open_entity_mentions)>0:
|
| 780 |
+
Error(
|
| 781 |
+
state=state, config=self.incfg,
|
| 782 |
+
testid='cross-sentence-mention',
|
| 783 |
+
message=f"Entity mentions must not cross sentence boundaries; still open at sentence end: {str(state.open_entity_mentions)}."
|
| 784 |
+
).confirm()
|
| 785 |
+
# Close the mentions forcibly. Otherwise one omitted closing bracket would cause the error messages to to explode because the words would be collected from the remainder of the file.
|
| 786 |
+
state.open_entity_mentions = []
|
| 787 |
+
if len(state.open_discontinuous_mentions)>0:
|
| 788 |
+
Error(
|
| 789 |
+
state=state, config=self.incfg,
|
| 790 |
+
testid='cross-sentence-mention',
|
| 791 |
+
message=f"Entity mentions must not cross sentence boundaries; still open at sentence end: {str(state.open_discontinuous_mentions)}."
|
| 792 |
+
).confirm()
|
| 793 |
+
# Close the mentions forcibly. Otherwise one omission would cause the error messages to to explode because the words would be collected from the remainder of the file.
|
| 794 |
+
state.open_discontinuous_mentions = {}
|
| 795 |
+
# Since we only test mentions within one sentence at present, we do not have to carry all mention spans until the end of the corpus.
|
| 796 |
+
for eid in state.entity_mention_spans:
|
| 797 |
+
if sentid in state.entity_mention_spans[eid]:
|
| 798 |
+
state.entity_mention_spans[eid].pop(sentid)
|
ud-tools/udtools/src/udtools/state.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 4 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 5 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 6 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 7 |
+
# from udtools import Validator.
|
| 8 |
+
try:
|
| 9 |
+
from udtools.src.udtools.incident import IncidentType
|
| 10 |
+
except ModuleNotFoundError:
|
| 11 |
+
from udtools.incident import IncidentType
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class State:
|
| 16 |
+
"""
|
| 17 |
+
The State class holds various global data about where we are in the file
|
| 18 |
+
and what we have seen so far. Typically there will be just one instance of
|
| 19 |
+
this class.
|
| 20 |
+
"""
|
| 21 |
+
def __init__(self):
|
| 22 |
+
# Name of the current input file.
|
| 23 |
+
self.current_file_name = None
|
| 24 |
+
# Current line in the input file, or, more precisely, the last line
|
| 25 |
+
# read so far. Once we start looking at tree integrity, we may find
|
| 26 |
+
# errors on previous lines as well.
|
| 27 |
+
self.current_line = 0;
|
| 28 |
+
# The line in the input file on which the current sentence starts,
|
| 29 |
+
# including sentence-level comments.
|
| 30 |
+
self.comment_start_line = 0
|
| 31 |
+
# The line in the input file on which the current sentence starts
|
| 32 |
+
# (the first node/token line, skipping comments).
|
| 33 |
+
self.sentence_line = 0
|
| 34 |
+
# The most recently read sentence id.
|
| 35 |
+
self.sentence_id = None
|
| 36 |
+
# List of input lines representing the current sentence (including
|
| 37 |
+
# comments and the final empty line). Newline characters omitted.
|
| 38 |
+
self.current_lines = []
|
| 39 |
+
# List of token/node lines in the current sentence, each line split
|
| 40 |
+
# to fields (columns). It is thus a list of lists of strings.
|
| 41 |
+
self.current_token_node_table = []
|
| 42 |
+
# Mapping from node ids (including empty nodes) to line numbers in the
|
| 43 |
+
# input file. Dictionary indexed by string.
|
| 44 |
+
self.current_node_linenos = {}
|
| 45 |
+
# Needed to check that no space after last word of sentence does not
|
| 46 |
+
# co-occur with new paragraph or document.
|
| 47 |
+
self.spaceafterno_in_effect = False
|
| 48 |
+
# Incident counter by type. Key: incident type, test class; value: incident count
|
| 49 |
+
# Incremented in Incident.report(), even if reporting is off or over --max_err.
|
| 50 |
+
self.error_counter = defaultdict(lambda: defaultdict(int))
|
| 51 |
+
# Lists of incidents confirmed so far, up to --max_store
|
| 52 |
+
self.error_tracker = []
|
| 53 |
+
# Set of detailed error explanations that have been printed so far.
|
| 54 |
+
# Each explanation will be printed only once. Typically, an explanation
|
| 55 |
+
# can be identified by test id + language code. Nevertheless, we put
|
| 56 |
+
# the whole explanation to the set.
|
| 57 |
+
self.explanation_printed = set()
|
| 58 |
+
# Some feature-related errors can only be reported if the corpus
|
| 59 |
+
# contains feature annotation because features are optional in general.
|
| 60 |
+
# Once we see the first feature, we can flush all accummulated
|
| 61 |
+
# complaints about missing features.
|
| 62 |
+
# Key: testid; value: dict with parameters of the error and the list of
|
| 63 |
+
# its occurrences.
|
| 64 |
+
self.delayed_feature_errors = {}
|
| 65 |
+
# Remember all sentence ids seen in all input files (presumably one
|
| 66 |
+
# corpus). We need it to check that each id is unique.
|
| 67 |
+
self.known_sent_ids = set()
|
| 68 |
+
# Similarly, parallel ids should be unique in a corpus. (If multiple
|
| 69 |
+
# sentences are equivalents of the same virtual sentence in the
|
| 70 |
+
# parallel collection, they should be distinguished with 'altN'.)
|
| 71 |
+
self.known_parallel_ids = set()
|
| 72 |
+
self.parallel_id_lastalt = {}
|
| 73 |
+
self.parallel_id_lastpart = {}
|
| 74 |
+
#----------------------------------------------------------------------
|
| 75 |
+
# Various things that we may have seen earlier in the corpus. The value
|
| 76 |
+
# is None if we have not seen it, otherwise it is the line number of
|
| 77 |
+
# the first occurrence.
|
| 78 |
+
#----------------------------------------------------------------------
|
| 79 |
+
self.seen_morpho_feature = None
|
| 80 |
+
self.seen_enhanced_graph = None
|
| 81 |
+
self.seen_tree_without_enhanced_graph = None
|
| 82 |
+
# Any difference between non-empty DEPS and HEAD:DEPREL.
|
| 83 |
+
# (Because we can see many enhanced graphs but no real enhancements.)
|
| 84 |
+
self.seen_enhancement = None
|
| 85 |
+
self.seen_empty_node = None
|
| 86 |
+
self.seen_enhanced_orphan = None
|
| 87 |
+
# global.entity comment line is needed for Entity annotations in MISC.
|
| 88 |
+
self.seen_global_entity = None
|
| 89 |
+
#----------------------------------------------------------------------
|
| 90 |
+
# Additional observations related to Entity annotation in MISC
|
| 91 |
+
# (only needed when validating entities and coreference).
|
| 92 |
+
#----------------------------------------------------------------------
|
| 93 |
+
# Remember the global.entity attribute string to be able to check that
|
| 94 |
+
# repeated declarations are identical.
|
| 95 |
+
self.global_entity_attribute_string = None
|
| 96 |
+
# The number of entity attributes will be derived from the attribute
|
| 97 |
+
# string and will be used to check that an entity does not have extra
|
| 98 |
+
# attributes.
|
| 99 |
+
self.entity_attribute_number = 0
|
| 100 |
+
# Key: entity attribute name; value: the index of the attribute in the
|
| 101 |
+
# entity attribute list.
|
| 102 |
+
self.entity_attribute_index = {}
|
| 103 |
+
# Key: entity (cluster) id; value: tuple: (type of the entity, identity
|
| 104 |
+
# (Wikipedia etc.), line of the first mention)).
|
| 105 |
+
self.entity_types = {}
|
| 106 |
+
# Indices of known entity ids in this and other documents.
|
| 107 |
+
# (Otherwise, if we only needed to know that an entity is known, we
|
| 108 |
+
# could use self.entity_types above.)
|
| 109 |
+
self.entity_ids_this_document = {}
|
| 110 |
+
self.entity_ids_other_documents = {}
|
| 111 |
+
# List of currently open entity mentions. Items are dictionaries with
|
| 112 |
+
# entity mention information.
|
| 113 |
+
self.open_entity_mentions = []
|
| 114 |
+
# For each entity that has currently open discontinuous mention,
|
| 115 |
+
# describe the last part of the mention. Key: entity id; value is dict,
|
| 116 |
+
# its keys: last_ipart, npart, line.
|
| 117 |
+
self.open_discontinuous_mentions = {}
|
| 118 |
+
# Key: srceid<tgteid pair; value: type of the entity (may be empty).
|
| 119 |
+
self.entity_bridge_relations = {}
|
| 120 |
+
# Key: tgteid; value: sorted list of srceids, serialized to string.
|
| 121 |
+
self.entity_split_antecedents = {}
|
| 122 |
+
# Key: [eid][sentid][str(mention_span)]; value: set of node ids.
|
| 123 |
+
self.entity_mention_spans = {}
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def get_current_file_name(self):
|
| 127 |
+
"""
|
| 128 |
+
Returns the current file name in the form suitable for Incident objects
|
| 129 |
+
and their string reports (i.e., 'STDIN' instead of '-', basename for
|
| 130 |
+
paths, 'NONE' otherwise).
|
| 131 |
+
|
| 132 |
+
Returns
|
| 133 |
+
-------
|
| 134 |
+
str
|
| 135 |
+
The modified name of the current input file.
|
| 136 |
+
"""
|
| 137 |
+
if self.current_file_name:
|
| 138 |
+
if self.current_file_name == '-':
|
| 139 |
+
return 'STDIN'
|
| 140 |
+
else:
|
| 141 |
+
return os.path.basename(self.current_file_name)
|
| 142 |
+
else:
|
| 143 |
+
return 'NONE'
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def __str__(self):
|
| 147 |
+
# Summarize the warnings and errors.
|
| 148 |
+
result = ''
|
| 149 |
+
passed = True
|
| 150 |
+
nerror = 0
|
| 151 |
+
if self.error_counter:
|
| 152 |
+
nwarning = 0
|
| 153 |
+
for k, v in self.error_counter[IncidentType.WARNING].items():
|
| 154 |
+
nwarning += v
|
| 155 |
+
if nwarning > 0:
|
| 156 |
+
result += f"Warnings: {nwarning}\n"
|
| 157 |
+
for k, v in sorted(self.error_counter[IncidentType.ERROR].items()):
|
| 158 |
+
nerror += v
|
| 159 |
+
passed = False
|
| 160 |
+
result += f"{str(k)} errors: {v}\n"
|
| 161 |
+
if passed:
|
| 162 |
+
result += '*** PASSED ***'
|
| 163 |
+
else:
|
| 164 |
+
result += f'*** FAILED *** with {nerror} errors'
|
| 165 |
+
return result
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def passed(self):
|
| 169 |
+
for k, v in self.error_counter[IncidentType.ERROR].items():
|
| 170 |
+
if v > 0:
|
| 171 |
+
return False
|
| 172 |
+
return True
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def __bool__(self):
|
| 176 |
+
return self.passed()
|
ud-tools/udtools/src/udtools/udeval.py
ADDED
|
@@ -0,0 +1,882 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
# Code from CoNLL 2018 UD shared task updated for evaluation of enhanced
|
| 4 |
+
# dependencies in IWPT 2020 shared task.
|
| 5 |
+
# -- read DEPS, split on '|', compute overlap
|
| 6 |
+
# New metrics ELAS and EULAS.
|
| 7 |
+
# Gosse Bouma
|
| 8 |
+
# New option --enhancements can switch off evaluation of certain types of
|
| 9 |
+
# enhancements: default --enhancements 0 ... evaluate all enhancement types
|
| 10 |
+
# 1 ... no gapping; 2 ... no coord shared parents; 3 ... no coord shared dependents
|
| 11 |
+
# 4 ... no xsubj (control verbs); 5 ... no relative clauses; 6 ... no case info in deprels;
|
| 12 |
+
# combinations: 12 ... both 1 and 2 apply
|
| 13 |
+
|
| 14 |
+
# Compatible with Python 2.7 and 3.2+, can be used either as a module
|
| 15 |
+
# or a standalone executable.
|
| 16 |
+
#
|
| 17 |
+
# Copyright 2017, 2018 Institute of Formal and Applied Linguistics (UFAL),
|
| 18 |
+
# Faculty of Mathematics and Physics, Charles University, Czech Republic.
|
| 19 |
+
#
|
| 20 |
+
# This Source Code Form is subject to the terms of the Mozilla Public
|
| 21 |
+
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
| 22 |
+
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 23 |
+
#
|
| 24 |
+
# Authors: Milan Straka, Martin Popel <surname@ufal.mff.cuni.cz>
|
| 25 |
+
#
|
| 26 |
+
# Changelog:
|
| 27 |
+
# - [12 Apr 2018] Version 0.9: Initial release.
|
| 28 |
+
# - [19 Apr 2018] Version 1.0: Fix bug in MLAS (duplicate entries in functional_children).
|
| 29 |
+
# Add --counts option.
|
| 30 |
+
# - [02 May 2018] Version 1.1: When removing spaces to match gold and system characters,
|
| 31 |
+
# consider all Unicode characters of category Zs instead of
|
| 32 |
+
# just ASCII space.
|
| 33 |
+
# - [25 Jun 2018] Version 1.2: Use python3 in the she-bang (instead of python).
|
| 34 |
+
# In Python2, make the whole computation use `unicode` strings.
|
| 35 |
+
|
| 36 |
+
# Command line usage
|
| 37 |
+
# ------------------
|
| 38 |
+
# eval.py [-v] [-c] gold_conllu_file system_conllu_file
|
| 39 |
+
#
|
| 40 |
+
# - if no -v is given, only the official IWPT 2020 Shared Task evaluation metrics
|
| 41 |
+
# are printed
|
| 42 |
+
# - if -v is given, more metrics are printed (as precision, recall, F1 score,
|
| 43 |
+
# and in case the metric is computed on aligned words also accuracy on these):
|
| 44 |
+
# - Tokens: how well do the gold tokens match system tokens
|
| 45 |
+
# - Sentences: how well do the gold sentences match system sentences
|
| 46 |
+
# - Words: how well can the gold words be aligned to system words
|
| 47 |
+
# - UPOS: using aligned words, how well does UPOS match
|
| 48 |
+
# - XPOS: using aligned words, how well does XPOS match
|
| 49 |
+
# - UFeats: using aligned words, how well does universal FEATS match
|
| 50 |
+
# - AllTags: using aligned words, how well does UPOS+XPOS+FEATS match
|
| 51 |
+
# - Lemmas: using aligned words, how well does LEMMA match
|
| 52 |
+
# - UAS: using aligned words, how well does HEAD match
|
| 53 |
+
# - LAS: using aligned words, how well does HEAD+DEPREL(ignoring subtypes) match
|
| 54 |
+
# - CLAS: using aligned words with content DEPREL, how well does
|
| 55 |
+
# HEAD+DEPREL(ignoring subtypes) match
|
| 56 |
+
# - MLAS: using aligned words with content DEPREL, how well does
|
| 57 |
+
# HEAD+DEPREL(ignoring subtypes)+UPOS+UFEATS+FunctionalChildren(DEPREL+UPOS+UFEATS) match
|
| 58 |
+
# - BLEX: using aligned words with content DEPREL, how well does
|
| 59 |
+
# HEAD+DEPREL(ignoring subtypes)+LEMMAS match
|
| 60 |
+
# - if -c is given, raw counts of correct/gold_total/system_total/aligned words are printed
|
| 61 |
+
# instead of precision/recall/F1/AlignedAccuracy for all metrics.
|
| 62 |
+
|
| 63 |
+
# API usage
|
| 64 |
+
# ---------
|
| 65 |
+
# - load_conllu(file)
|
| 66 |
+
# - loads CoNLL-U file from given file object to an internal representation
|
| 67 |
+
# - the file object should return str in both Python 2 and Python 3
|
| 68 |
+
# - raises UDError exception if the given file cannot be loaded
|
| 69 |
+
# - evaluate(gold_ud, system_ud)
|
| 70 |
+
# - evaluate the given gold and system CoNLL-U files (loaded with load_conllu)
|
| 71 |
+
# - raises UDError if the concatenated tokens of gold and system file do not match
|
| 72 |
+
# - returns a dictionary with the metrics described above, each metric having
|
| 73 |
+
# three fields: precision, recall and f1
|
| 74 |
+
|
| 75 |
+
# Description of token matching
|
| 76 |
+
# -----------------------------
|
| 77 |
+
# In order to match tokens of gold file and system file, we consider the text
|
| 78 |
+
# resulting from concatenation of gold tokens and text resulting from
|
| 79 |
+
# concatenation of system tokens. These texts should match -- if they do not,
|
| 80 |
+
# the evaluation fails.
|
| 81 |
+
#
|
| 82 |
+
# If the texts do match, every token is represented as a range in this original
|
| 83 |
+
# text, and tokens are equal only if their range is the same.
|
| 84 |
+
|
| 85 |
+
# Description of word matching
|
| 86 |
+
# ----------------------------
|
| 87 |
+
# When matching words of gold file and system file, we first match the tokens.
|
| 88 |
+
# The words which are also tokens are matched as tokens, but words in multi-word
|
| 89 |
+
# tokens have to be handled differently.
|
| 90 |
+
#
|
| 91 |
+
# To handle multi-word tokens, we start by finding "multi-word spans".
|
| 92 |
+
# Multi-word span is a span in the original text such that
|
| 93 |
+
# - it contains at least one multi-word token
|
| 94 |
+
# - all multi-word tokens in the span (considering both gold and system ones)
|
| 95 |
+
# are completely inside the span (i.e., they do not "stick out")
|
| 96 |
+
# - the multi-word span is as small as possible
|
| 97 |
+
#
|
| 98 |
+
# For every multi-word span, we align the gold and system words completely
|
| 99 |
+
# inside this span using LCS on their FORMs. The words not intersecting
|
| 100 |
+
# (even partially) any multi-word span are then aligned as tokens.
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
from __future__ import division
|
| 104 |
+
from __future__ import print_function
|
| 105 |
+
|
| 106 |
+
import io
|
| 107 |
+
import sys
|
| 108 |
+
import unicodedata
|
| 109 |
+
import unittest
|
| 110 |
+
|
| 111 |
+
# CoNLL-U column names
|
| 112 |
+
ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10)
|
| 113 |
+
|
| 114 |
+
# Content and functional relations
|
| 115 |
+
CONTENT_DEPRELS = {
|
| 116 |
+
"nsubj", "obj", "iobj", "csubj", "ccomp", "xcomp", "obl", "vocative",
|
| 117 |
+
"expl", "dislocated", "advcl", "advmod", "discourse", "nmod", "appos",
|
| 118 |
+
"nummod", "acl", "amod", "conj", "fixed", "flat", "compound", "list",
|
| 119 |
+
"parataxis", "orphan", "goeswith", "reparandum", "root", "dep"
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
FUNCTIONAL_DEPRELS = {
|
| 123 |
+
"aux", "cop", "mark", "det", "clf", "case", "cc"
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
UNIVERSAL_FEATURES = {
|
| 127 |
+
"PronType", "NumType", "Poss", "Reflex", "Foreign", "Abbr", "Gender",
|
| 128 |
+
"Animacy", "Number", "Case", "Definite", "Degree", "VerbForm", "Mood",
|
| 129 |
+
"Tense", "Aspect", "Voice", "Evident", "Polarity", "Person", "Polite"
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
# UD Error is used when raising exceptions in this module
|
| 133 |
+
class UDError(Exception):
|
| 134 |
+
pass
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
CASE_DEPRELS = {'obl','nmod','conj','advcl'}
|
| 139 |
+
UNIVERSAL_DEPREL_EXTENSIONS = {'pass','relcl','xsubj'}
|
| 140 |
+
|
| 141 |
+
# Modify the set of deps produced by system to be in accordance with gold treebank type.
|
| 142 |
+
# Return a (filtered) list of (hd, dependency_path) tuples.
|
| 143 |
+
def process_enhanced_deps(deps) :
|
| 144 |
+
edeps = []
|
| 145 |
+
if deps != '' and deps != '_':
|
| 146 |
+
for edep in deps.split('|') :
|
| 147 |
+
(hd, path) = edep.split(':', 1)
|
| 148 |
+
steps = path.split('>') # collapsing empty nodes gives rise to paths like this : 3:conj:en>obl:voor
|
| 149 |
+
edeps.append((hd,steps)) # (3,['conj:en','obj:voor'])
|
| 150 |
+
return edeps
|
| 151 |
+
|
| 152 |
+
# Load given CoNLL-U file into internal representation.
|
| 153 |
+
# The file parameter is the open file object.
|
| 154 |
+
# The path parameter is needed only for diagnostic messages.
|
| 155 |
+
def load_conllu(file, path, treebank_type):
|
| 156 |
+
# Internal representation classes
|
| 157 |
+
class UDRepresentation:
|
| 158 |
+
def __init__(self):
|
| 159 |
+
# Characters of all the tokens in the whole file.
|
| 160 |
+
# Whitespace between tokens is not included.
|
| 161 |
+
self.characters = []
|
| 162 |
+
# List of UDSpan instances with start&end indices into `characters`.
|
| 163 |
+
self.tokens = []
|
| 164 |
+
# List of UDWord instances.
|
| 165 |
+
self.words = []
|
| 166 |
+
# List of UDSpan instances with start&end indices into `characters`.
|
| 167 |
+
self.sentences = []
|
| 168 |
+
# File path may be needed in error messages.
|
| 169 |
+
self.path = ''
|
| 170 |
+
class UDSpan:
|
| 171 |
+
def __init__(self, start, end, line):
|
| 172 |
+
self.start = start
|
| 173 |
+
# Note that self.end marks the first position **after the end** of span,
|
| 174 |
+
# so we can use characters[start:end] or range(start, end).
|
| 175 |
+
self.end = end
|
| 176 |
+
# Line number (1-based) will be useful if we need to report an error later.
|
| 177 |
+
self.line = line
|
| 178 |
+
class UDWord:
|
| 179 |
+
def __init__(self, span, columns, is_multiword):
|
| 180 |
+
# Span of this word (or MWT, see below) within ud_representation.characters.
|
| 181 |
+
self.span = span
|
| 182 |
+
# 10 columns of the CoNLL-U file: ID, FORM, LEMMA,...
|
| 183 |
+
self.columns = columns
|
| 184 |
+
# is_multiword==True means that this word is part of a multi-word token.
|
| 185 |
+
# In that case, self.span marks the span of the whole multi-word token.
|
| 186 |
+
self.is_multiword = is_multiword
|
| 187 |
+
# Reference to the UDWord instance representing the HEAD (or None if root).
|
| 188 |
+
self.parent = None
|
| 189 |
+
# List of references to UDWord instances representing functional-deprel children.
|
| 190 |
+
self.functional_children = []
|
| 191 |
+
# Only consider universal FEATS.
|
| 192 |
+
self.columns[FEATS] = "|".join(sorted(feat for feat in columns[FEATS].split("|")
|
| 193 |
+
if feat.split("=", 1)[0] in UNIVERSAL_FEATURES))
|
| 194 |
+
# Let's ignore language-specific deprel subtypes.
|
| 195 |
+
self.columns[DEPREL] = columns[DEPREL].split(":")[0]
|
| 196 |
+
# Precompute which deprels are CONTENT_DEPRELS and which FUNCTIONAL_DEPRELS
|
| 197 |
+
self.is_content_deprel = self.columns[DEPREL] in CONTENT_DEPRELS
|
| 198 |
+
self.is_functional_deprel = self.columns[DEPREL] in FUNCTIONAL_DEPRELS
|
| 199 |
+
# store enhanced deps --GB
|
| 200 |
+
# split string positions and enhanced labels as well?
|
| 201 |
+
self.columns[DEPS] = process_enhanced_deps(columns[DEPS])
|
| 202 |
+
|
| 203 |
+
ud = UDRepresentation()
|
| 204 |
+
|
| 205 |
+
# Load the CoNLL-U file
|
| 206 |
+
ud.path = path
|
| 207 |
+
index, sentence_start = 0, None
|
| 208 |
+
line_idx = 0
|
| 209 |
+
while True:
|
| 210 |
+
line = file.readline()
|
| 211 |
+
line_idx += 1 # errors will be displayed indexed from 1
|
| 212 |
+
if not line:
|
| 213 |
+
break
|
| 214 |
+
line = line.rstrip("\r\n")
|
| 215 |
+
|
| 216 |
+
# Handle sentence start boundaries
|
| 217 |
+
if sentence_start is None:
|
| 218 |
+
# Skip comments
|
| 219 |
+
if line.startswith("#"):
|
| 220 |
+
continue
|
| 221 |
+
# Start a new sentence
|
| 222 |
+
ud.sentences.append(UDSpan(index, 0, line_idx))
|
| 223 |
+
sentence_start = len(ud.words)
|
| 224 |
+
if not line:
|
| 225 |
+
# Add parent and children UDWord links and check there are no cycles
|
| 226 |
+
def process_word(word):
|
| 227 |
+
if word.parent == "remapping":
|
| 228 |
+
raise UDError("There is a cycle in the sentence that ends at line %d" % line_idx)
|
| 229 |
+
if word.parent is None:
|
| 230 |
+
head = int(word.columns[HEAD])
|
| 231 |
+
if head < 0 or head > len(ud.words) - sentence_start:
|
| 232 |
+
raise UDError("HEAD '{}' points outside of the sentence that ends at line {}".format(word.columns[HEAD], line_idx))
|
| 233 |
+
if head:
|
| 234 |
+
parent = ud.words[sentence_start + head - 1]
|
| 235 |
+
word.parent = "remapping"
|
| 236 |
+
process_word(parent)
|
| 237 |
+
word.parent = parent
|
| 238 |
+
|
| 239 |
+
position = sentence_start # need to incrementally keep track of current position for loop detection in relcl
|
| 240 |
+
for word in ud.words[sentence_start:]:
|
| 241 |
+
process_word(word)
|
| 242 |
+
enhanced_deps = word.columns[DEPS]
|
| 243 |
+
# replace head positions of enhanced dependencies with parent word object -- GB
|
| 244 |
+
processed_deps = []
|
| 245 |
+
for (head,steps) in word.columns[DEPS] : # (3,['conj:en','obj:voor'])
|
| 246 |
+
# Empty nodes should have been collapsed during preprocessing.
|
| 247 |
+
# If not, we cannot evaluate gapping correctly. However, people
|
| 248 |
+
# may care just about basic trees and may not want to bother
|
| 249 |
+
# with preprocessing.
|
| 250 |
+
if '.' in head:
|
| 251 |
+
if treebank_type.get('no_empty_nodes', False):
|
| 252 |
+
raise UDError("The collapsed CoNLL-U file still contains references to empty nodes at line {}: {}".format(line_idx, line))
|
| 253 |
+
else:
|
| 254 |
+
continue
|
| 255 |
+
hd = int(head)
|
| 256 |
+
parent = ud.words[sentence_start + hd -1] if hd else hd # just assign '0' to parent for root cases
|
| 257 |
+
processed_deps.append((parent,steps))
|
| 258 |
+
enhanced_deps = processed_deps
|
| 259 |
+
|
| 260 |
+
# ignore rel>rel dependencies, and instead append the original hd/rel edge
|
| 261 |
+
# note that this also ignores other extensions (like adding lemma's)
|
| 262 |
+
# note that this sometimes introduces duplicates (if orig hd/rel was already included in DEPS)
|
| 263 |
+
if treebank_type.get('no_gapping', False) : # enhancement 1
|
| 264 |
+
processed_deps = []
|
| 265 |
+
for (parent,steps) in enhanced_deps :
|
| 266 |
+
if len(steps) > 1 :
|
| 267 |
+
processed_deps.append((word.parent,[word.columns[DEPREL]]))
|
| 268 |
+
else :
|
| 269 |
+
if (parent,steps) in processed_deps :
|
| 270 |
+
True
|
| 271 |
+
else :
|
| 272 |
+
processed_deps.append((parent,steps))
|
| 273 |
+
enhanced_deps = processed_deps
|
| 274 |
+
|
| 275 |
+
# for a given conj node, any rel other than conj in DEPS can be ignored
|
| 276 |
+
if treebank_type.get('no_shared_parents_in_coordination', False) : # enhancement 2
|
| 277 |
+
for (hd,steps) in enhanced_deps :
|
| 278 |
+
if len(steps) == 1 and steps[0].startswith('conj') :
|
| 279 |
+
enhanced_deps = [(hd,steps)]
|
| 280 |
+
|
| 281 |
+
# deprels not matching ud_hd/ud_dep are spurious.
|
| 282 |
+
# czech/pud estonian/ewt syntagrus finnish/pud
|
| 283 |
+
# TO DO: treebanks that do not mark xcomp and relcl subjects
|
| 284 |
+
if treebank_type.get('no_shared_dependents_in_coordination', False) : # enhancement 3
|
| 285 |
+
processed_deps = []
|
| 286 |
+
for (hd,steps) in enhanced_deps :
|
| 287 |
+
duplicate = 0
|
| 288 |
+
for (hd2,steps2) in enhanced_deps :
|
| 289 |
+
if steps == steps2 and hd2 == word.columns[HEAD] and hd != hd2 : # checking only for ud_hd here, check for ud_dep as well?
|
| 290 |
+
duplicate = 1
|
| 291 |
+
if not(duplicate) :
|
| 292 |
+
processed_deps.append((hd,steps))
|
| 293 |
+
enhanced_deps = processed_deps
|
| 294 |
+
|
| 295 |
+
# if treebank does not have control relations: subjects of xcomp parents in system are to be skipped
|
| 296 |
+
# note that rel is actually a path sometimes rel1>rel2 in theory rel2 could be subj?
|
| 297 |
+
# from lassy-small: 7:conj:en>nsubj:pass|7:conj:en>nsubj:xsubj (7,['conj:en','nsubj:xsubj'])
|
| 298 |
+
if treebank_type.get('no_control', False) : # enhancement 4
|
| 299 |
+
processed_deps = []
|
| 300 |
+
for (parent,steps) in enhanced_deps :
|
| 301 |
+
include = 1
|
| 302 |
+
if ( parent and parent.columns[DEPREL] == 'xcomp') :
|
| 303 |
+
for rel in steps:
|
| 304 |
+
if rel.startswith('nsubj') :
|
| 305 |
+
include = 0
|
| 306 |
+
if include :
|
| 307 |
+
processed_deps.append((parent,steps))
|
| 308 |
+
enhanced_deps = processed_deps
|
| 309 |
+
|
| 310 |
+
if treebank_type.get('no_external_arguments_of_relative_clauses', False) : # enhancement 5
|
| 311 |
+
processed_deps = []
|
| 312 |
+
for (parent,steps) in enhanced_deps :
|
| 313 |
+
if (steps[0] == 'ref') :
|
| 314 |
+
processed_deps.append((word.parent,[word.columns[DEPREL]])) # append the original relation
|
| 315 |
+
# ignore external argument link
|
| 316 |
+
# external args are deps of an acl:relcl where that acl also is a dependent of external arg (i.e. ext arg introduces a cycle)
|
| 317 |
+
elif ( parent and parent.columns[DEPREL].startswith('acl') and int(parent.columns[HEAD]) == position - sentence_start ) :
|
| 318 |
+
#print('removed external argument')
|
| 319 |
+
True
|
| 320 |
+
else :
|
| 321 |
+
processed_deps.append((parent,steps))
|
| 322 |
+
enhanced_deps = processed_deps
|
| 323 |
+
|
| 324 |
+
# treebanks where no lemma info has been added
|
| 325 |
+
if treebank_type.get('no_case_info', False) : # enhancement number 6
|
| 326 |
+
processed_deps = []
|
| 327 |
+
for (hd,steps) in enhanced_deps :
|
| 328 |
+
processed_steps = []
|
| 329 |
+
for dep in steps :
|
| 330 |
+
depparts = dep.split(':')
|
| 331 |
+
if depparts[0] in CASE_DEPRELS :
|
| 332 |
+
if (len(depparts) == 2 and not(depparts[1] in UNIVERSAL_DEPREL_EXTENSIONS )) :
|
| 333 |
+
dep = depparts[0]
|
| 334 |
+
processed_steps.append(dep)
|
| 335 |
+
processed_deps.append((hd,processed_steps))
|
| 336 |
+
enhanced_deps = processed_deps
|
| 337 |
+
|
| 338 |
+
position += 1
|
| 339 |
+
word.columns[DEPS] = enhanced_deps
|
| 340 |
+
|
| 341 |
+
# func_children cannot be assigned within process_word
|
| 342 |
+
# because it is called recursively and may result in adding one child twice.
|
| 343 |
+
for word in ud.words[sentence_start:]:
|
| 344 |
+
if word.parent and word.is_functional_deprel:
|
| 345 |
+
word.parent.functional_children.append(word)
|
| 346 |
+
|
| 347 |
+
if len(ud.words) == sentence_start :
|
| 348 |
+
raise UDError("There is a sentence with 0 tokens (possibly a double blank line) at line %d" % line_idx)
|
| 349 |
+
|
| 350 |
+
# Check there is a single root node
|
| 351 |
+
if len([word for word in ud.words[sentence_start:] if word.parent is None]) == 0:
|
| 352 |
+
raise UDError("There are no roots in the sentence that ends at %d" % line_idx)
|
| 353 |
+
if not treebank_type.get('multiple_roots_okay', False):
|
| 354 |
+
if len([word for word in ud.words[sentence_start:] if word.parent is None]) > 1:
|
| 355 |
+
raise UDError("There are multiple roots in the sentence that ends at %d" % line_idx)
|
| 356 |
+
|
| 357 |
+
# End the sentence
|
| 358 |
+
ud.sentences[-1].end = index
|
| 359 |
+
sentence_start = None
|
| 360 |
+
continue
|
| 361 |
+
|
| 362 |
+
# Read next token/word
|
| 363 |
+
columns = line.split("\t")
|
| 364 |
+
if len(columns) != 10:
|
| 365 |
+
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns at line {}: '{}'".format(line_idx, line))
|
| 366 |
+
|
| 367 |
+
# Skip empty nodes
|
| 368 |
+
# If we are evaluating enhanced graphs, empty nodes should have been collapsed
|
| 369 |
+
# during preprocessing and should not occur here. However, we cannot raise
|
| 370 |
+
# an exception if they do because the user may be interested just in the
|
| 371 |
+
# basic tree and may not want to bother with preprocessing.
|
| 372 |
+
if "." in columns[ID]:
|
| 373 |
+
# When launching this script, we can specify that empty nodes should be considered errors.
|
| 374 |
+
if treebank_type.get('no_empty_nodes', False):
|
| 375 |
+
raise UDError("The collapsed CoNLL-U line still contains empty nodes at line {}: {}".format(line_idx, line))
|
| 376 |
+
else:
|
| 377 |
+
continue
|
| 378 |
+
|
| 379 |
+
# Delete spaces from FORM, so gold.characters == system.characters
|
| 380 |
+
# even if one of them tokenizes the space. Use any Unicode character
|
| 381 |
+
# with category Zs.
|
| 382 |
+
columns[FORM] = "".join(filter(lambda c: unicodedata.category(c) != "Zs", columns[FORM]))
|
| 383 |
+
if not columns[FORM]:
|
| 384 |
+
raise UDError("There is an empty FORM in the CoNLL-U file at line %d" % line_idx)
|
| 385 |
+
|
| 386 |
+
# Save token
|
| 387 |
+
ud.characters.extend(columns[FORM])
|
| 388 |
+
ud.tokens.append(UDSpan(index, index + len(columns[FORM]), line_idx))
|
| 389 |
+
index += len(columns[FORM])
|
| 390 |
+
|
| 391 |
+
# Handle multi-word tokens to save word(s)
|
| 392 |
+
if "-" in columns[ID]:
|
| 393 |
+
try:
|
| 394 |
+
start, end = map(int, columns[ID].split("-"))
|
| 395 |
+
except:
|
| 396 |
+
raise UDError("Cannot parse multi-word token ID '{}' at line {}".format(columns[ID], line_idx))
|
| 397 |
+
|
| 398 |
+
words_expected = end - start + 1
|
| 399 |
+
words_found = 0
|
| 400 |
+
while words_found < words_expected:
|
| 401 |
+
word_line = file.readline().rstrip("\r\n")
|
| 402 |
+
if not word_line:
|
| 403 |
+
raise UDError("The CoNLL-U file ends in an unfinished MWT at line {}".format(line_idx))
|
| 404 |
+
line_idx += 1
|
| 405 |
+
word_columns = word_line.split("\t")
|
| 406 |
+
if len(word_columns) != 10:
|
| 407 |
+
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns at line {}: '{}'".format(line_idx, word_line))
|
| 408 |
+
if "." in word_columns[ID]:
|
| 409 |
+
if treebank_type.get('no_empty_nodes', False):
|
| 410 |
+
raise UDError("The collapsed CoNLL-U line still contains empty nodes at line {}: {}".format(line_idx, line))
|
| 411 |
+
else:
|
| 412 |
+
continue
|
| 413 |
+
ud.words.append(UDWord(ud.tokens[-1], word_columns, is_multiword=True))
|
| 414 |
+
words_found += 1
|
| 415 |
+
|
| 416 |
+
# Basic tokens/words
|
| 417 |
+
else:
|
| 418 |
+
try:
|
| 419 |
+
word_id = int(columns[ID])
|
| 420 |
+
except:
|
| 421 |
+
raise UDError("Cannot parse word ID '{}' at line {}".format(columns[ID], line_idx))
|
| 422 |
+
if word_id != len(ud.words) - sentence_start + 1:
|
| 423 |
+
raise UDError("Incorrect word ID '{}' for word '{}', expected '{}' at line {}".format(
|
| 424 |
+
columns[ID], columns[FORM], len(ud.words) - sentence_start + 1, line_idx))
|
| 425 |
+
|
| 426 |
+
try:
|
| 427 |
+
head_id = int(columns[HEAD])
|
| 428 |
+
except ValueError as e:
|
| 429 |
+
raise UDError("Cannot parse HEAD '{}' at line {}".format(columns[HEAD], line_idx)) from e
|
| 430 |
+
if head_id < 0:
|
| 431 |
+
raise UDError("HEAD cannot be negative at line %d" % line_idx)
|
| 432 |
+
|
| 433 |
+
ud.words.append(UDWord(ud.tokens[-1], columns, is_multiword=False))
|
| 434 |
+
|
| 435 |
+
if sentence_start is not None:
|
| 436 |
+
raise UDError("The CoNLL-U file does not end with empty line")
|
| 437 |
+
|
| 438 |
+
return ud
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
# Evaluate the gold and system treebanks (loaded using load_conllu).
|
| 443 |
+
def evaluate(gold_ud, system_ud):
|
| 444 |
+
"""
|
| 445 |
+
Takes internal representations of two CoNLL-U files, compares their
|
| 446 |
+
contents and returns the scores.
|
| 447 |
+
|
| 448 |
+
Parameters
|
| 449 |
+
----------
|
| 450 |
+
gold_ud : UDRepresentation
|
| 451 |
+
Gold standard data.
|
| 452 |
+
system_ud : UDRepresentationi
|
| 453 |
+
System output data.
|
| 454 |
+
|
| 455 |
+
Raises
|
| 456 |
+
------
|
| 457 |
+
UDError
|
| 458 |
+
If the underlying texts of the files are not compatible.
|
| 459 |
+
|
| 460 |
+
Returns
|
| 461 |
+
-------
|
| 462 |
+
dict
|
| 463 |
+
Indexed by metric names, the values are scores.
|
| 464 |
+
"""
|
| 465 |
+
class Score:
|
| 466 |
+
def __init__(self, gold_total, system_total, correct, aligned_total=None):
|
| 467 |
+
self.correct = correct
|
| 468 |
+
self.gold_total = gold_total
|
| 469 |
+
self.system_total = system_total
|
| 470 |
+
self.aligned_total = aligned_total
|
| 471 |
+
self.precision = correct / system_total if system_total else 0.0
|
| 472 |
+
self.recall = correct / gold_total if gold_total else 0.0
|
| 473 |
+
self.f1 = 2 * correct / (system_total + gold_total) if system_total + gold_total else 0.0
|
| 474 |
+
self.aligned_accuracy = correct / aligned_total if aligned_total else aligned_total
|
| 475 |
+
class AlignmentWord:
|
| 476 |
+
def __init__(self, gold_word, system_word):
|
| 477 |
+
self.gold_word = gold_word
|
| 478 |
+
self.system_word = system_word
|
| 479 |
+
class Alignment:
|
| 480 |
+
def __init__(self, gold_words, system_words):
|
| 481 |
+
self.gold_words = gold_words
|
| 482 |
+
self.system_words = system_words
|
| 483 |
+
self.matched_words = []
|
| 484 |
+
self.matched_words_map = {}
|
| 485 |
+
def append_aligned_words(self, gold_word, system_word):
|
| 486 |
+
self.matched_words.append(AlignmentWord(gold_word, system_word))
|
| 487 |
+
self.matched_words_map[system_word] = gold_word
|
| 488 |
+
|
| 489 |
+
def spans_score(gold_spans, system_spans):
|
| 490 |
+
correct, gi, si = 0, 0, 0
|
| 491 |
+
while gi < len(gold_spans) and si < len(system_spans):
|
| 492 |
+
if system_spans[si].start < gold_spans[gi].start:
|
| 493 |
+
si += 1
|
| 494 |
+
elif gold_spans[gi].start < system_spans[si].start:
|
| 495 |
+
gi += 1
|
| 496 |
+
else:
|
| 497 |
+
correct += gold_spans[gi].end == system_spans[si].end
|
| 498 |
+
si += 1
|
| 499 |
+
gi += 1
|
| 500 |
+
|
| 501 |
+
return Score(len(gold_spans), len(system_spans), correct)
|
| 502 |
+
|
| 503 |
+
def alignment_score(alignment, key_fn=None, filter_fn=None):
|
| 504 |
+
if filter_fn is not None:
|
| 505 |
+
gold = sum(1 for gold in alignment.gold_words if filter_fn(gold))
|
| 506 |
+
system = sum(1 for system in alignment.system_words if filter_fn(system))
|
| 507 |
+
aligned = sum(1 for word in alignment.matched_words if filter_fn(word.gold_word))
|
| 508 |
+
else:
|
| 509 |
+
gold = len(alignment.gold_words)
|
| 510 |
+
system = len(alignment.system_words)
|
| 511 |
+
aligned = len(alignment.matched_words)
|
| 512 |
+
|
| 513 |
+
if key_fn is None:
|
| 514 |
+
# Return score for whole aligned words
|
| 515 |
+
return Score(gold, system, aligned)
|
| 516 |
+
|
| 517 |
+
def gold_aligned_gold(word):
|
| 518 |
+
return word
|
| 519 |
+
def gold_aligned_system(word):
|
| 520 |
+
return alignment.matched_words_map.get(word, 'NotAligned') if word is not None else None
|
| 521 |
+
correct = 0
|
| 522 |
+
for words in alignment.matched_words:
|
| 523 |
+
if filter_fn is None or filter_fn(words.gold_word):
|
| 524 |
+
if key_fn(words.gold_word, gold_aligned_gold) == key_fn(words.system_word, gold_aligned_system):
|
| 525 |
+
correct += 1
|
| 526 |
+
|
| 527 |
+
return Score(gold, system, correct, aligned)
|
| 528 |
+
|
| 529 |
+
def enhanced_alignment_score(alignment, EULAS):
|
| 530 |
+
# count all matching enhanced deprels in gold, system GB
|
| 531 |
+
# gold and system = sum of gold and predicted deps
|
| 532 |
+
# parents are pointers to word object, make sure to compare system parent with aligned word in gold in cases where
|
| 533 |
+
# tokenization introduces mismatches in number of words per sentence.
|
| 534 |
+
gold = 0
|
| 535 |
+
for gold_word in alignment.gold_words :
|
| 536 |
+
gold += len(gold_word.columns[DEPS])
|
| 537 |
+
system = 0
|
| 538 |
+
for system_word in alignment.system_words :
|
| 539 |
+
system += len(system_word.columns[DEPS])
|
| 540 |
+
correct = 0
|
| 541 |
+
for words in alignment.matched_words:
|
| 542 |
+
gold_deps = words.gold_word.columns[DEPS]
|
| 543 |
+
system_deps = words.system_word.columns[DEPS]
|
| 544 |
+
for (parent, dep) in gold_deps :
|
| 545 |
+
eulas_dep = [d.split(':')[0] for d in dep]
|
| 546 |
+
for (sparent, sdep) in system_deps:
|
| 547 |
+
eulas_sdep = [d.split(':')[0] for d in sdep]
|
| 548 |
+
if dep == sdep or ( eulas_dep == eulas_sdep and EULAS ) :
|
| 549 |
+
if parent == alignment.matched_words_map.get(sparent, 'NotAligned') :
|
| 550 |
+
correct += 1
|
| 551 |
+
elif (parent == 0 and sparent == 0) : # cases where parent is root
|
| 552 |
+
correct += 1
|
| 553 |
+
return Score(gold, system, correct)
|
| 554 |
+
|
| 555 |
+
def beyond_end(words, i, multiword_span_end):
|
| 556 |
+
if i >= len(words):
|
| 557 |
+
return True
|
| 558 |
+
if words[i].is_multiword:
|
| 559 |
+
return words[i].span.start >= multiword_span_end
|
| 560 |
+
return words[i].span.end > multiword_span_end
|
| 561 |
+
|
| 562 |
+
def extend_end(word, multiword_span_end):
|
| 563 |
+
if word.is_multiword and word.span.end > multiword_span_end:
|
| 564 |
+
return word.span.end
|
| 565 |
+
return multiword_span_end
|
| 566 |
+
|
| 567 |
+
def find_multiword_span(gold_words, system_words, gi, si):
|
| 568 |
+
# We know gold_words[gi].is_multiword or system_words[si].is_multiword.
|
| 569 |
+
# Find the start of the multiword span (gs, ss), so the multiword span is minimal.
|
| 570 |
+
# Initialize multiword_span_end characters index.
|
| 571 |
+
if gold_words[gi].is_multiword:
|
| 572 |
+
multiword_span_end = gold_words[gi].span.end
|
| 573 |
+
if not system_words[si].is_multiword and system_words[si].span.start < gold_words[gi].span.start:
|
| 574 |
+
si += 1
|
| 575 |
+
else: # if system_words[si].is_multiword
|
| 576 |
+
multiword_span_end = system_words[si].span.end
|
| 577 |
+
if not gold_words[gi].is_multiword and gold_words[gi].span.start < system_words[si].span.start:
|
| 578 |
+
gi += 1
|
| 579 |
+
gs, ss = gi, si
|
| 580 |
+
|
| 581 |
+
# Find the end of the multiword span
|
| 582 |
+
# (so both gi and si are pointing to the word following the multiword span end).
|
| 583 |
+
while not beyond_end(gold_words, gi, multiword_span_end) or \
|
| 584 |
+
not beyond_end(system_words, si, multiword_span_end):
|
| 585 |
+
if gi < len(gold_words) and (si >= len(system_words) or
|
| 586 |
+
gold_words[gi].span.start <= system_words[si].span.start):
|
| 587 |
+
multiword_span_end = extend_end(gold_words[gi], multiword_span_end)
|
| 588 |
+
gi += 1
|
| 589 |
+
else:
|
| 590 |
+
multiword_span_end = extend_end(system_words[si], multiword_span_end)
|
| 591 |
+
si += 1
|
| 592 |
+
return gs, ss, gi, si
|
| 593 |
+
|
| 594 |
+
def compute_lcs(gold_words, system_words, gi, si, gs, ss):
|
| 595 |
+
lcs = [[0] * (si - ss) for i in range(gi - gs)]
|
| 596 |
+
for g in reversed(range(gi - gs)):
|
| 597 |
+
for s in reversed(range(si - ss)):
|
| 598 |
+
if gold_words[gs + g].columns[FORM].lower() == system_words[ss + s].columns[FORM].lower():
|
| 599 |
+
lcs[g][s] = 1 + (lcs[g+1][s+1] if g+1 < gi-gs and s+1 < si-ss else 0)
|
| 600 |
+
lcs[g][s] = max(lcs[g][s], lcs[g+1][s] if g+1 < gi-gs else 0)
|
| 601 |
+
lcs[g][s] = max(lcs[g][s], lcs[g][s+1] if s+1 < si-ss else 0)
|
| 602 |
+
return lcs
|
| 603 |
+
|
| 604 |
+
def align_words(gold_words, system_words):
|
| 605 |
+
alignment = Alignment(gold_words, system_words)
|
| 606 |
+
|
| 607 |
+
gi, si = 0, 0
|
| 608 |
+
while gi < len(gold_words) and si < len(system_words):
|
| 609 |
+
if gold_words[gi].is_multiword or system_words[si].is_multiword:
|
| 610 |
+
# A: Multi-word tokens => align via LCS within the whole "multiword span".
|
| 611 |
+
gs, ss, gi, si = find_multiword_span(gold_words, system_words, gi, si)
|
| 612 |
+
|
| 613 |
+
if si > ss and gi > gs:
|
| 614 |
+
lcs = compute_lcs(gold_words, system_words, gi, si, gs, ss)
|
| 615 |
+
|
| 616 |
+
# Store aligned words
|
| 617 |
+
s, g = 0, 0
|
| 618 |
+
while g < gi - gs and s < si - ss:
|
| 619 |
+
if gold_words[gs + g].columns[FORM].lower() == system_words[ss + s].columns[FORM].lower():
|
| 620 |
+
alignment.append_aligned_words(gold_words[gs+g], system_words[ss+s])
|
| 621 |
+
g += 1
|
| 622 |
+
s += 1
|
| 623 |
+
elif lcs[g][s] == (lcs[g+1][s] if g+1 < gi-gs else 0):
|
| 624 |
+
g += 1
|
| 625 |
+
else:
|
| 626 |
+
s += 1
|
| 627 |
+
else:
|
| 628 |
+
# B: No multi-word token => align according to spans.
|
| 629 |
+
if (gold_words[gi].span.start, gold_words[gi].span.end) == (system_words[si].span.start, system_words[si].span.end):
|
| 630 |
+
alignment.append_aligned_words(gold_words[gi], system_words[si])
|
| 631 |
+
gi += 1
|
| 632 |
+
si += 1
|
| 633 |
+
elif gold_words[gi].span.start <= system_words[si].span.start:
|
| 634 |
+
gi += 1
|
| 635 |
+
else:
|
| 636 |
+
si += 1
|
| 637 |
+
|
| 638 |
+
return alignment
|
| 639 |
+
|
| 640 |
+
# Check that the underlying character sequences match.
|
| 641 |
+
if gold_ud.characters != system_ud.characters:
|
| 642 |
+
# Identify the surrounding tokens and line numbers so the error is easier to debug.
|
| 643 |
+
index = 0
|
| 644 |
+
while index < len(gold_ud.characters) and index < len(system_ud.characters) and \
|
| 645 |
+
gold_ud.characters[index] == system_ud.characters[index]:
|
| 646 |
+
index += 1
|
| 647 |
+
gtindex = 0
|
| 648 |
+
while gtindex < len(gold_ud.tokens) and gold_ud.tokens[gtindex].end-1 < index:
|
| 649 |
+
gtindex += 1
|
| 650 |
+
stindex = 0
|
| 651 |
+
while stindex < len(system_ud.tokens) and system_ud.tokens[stindex].end-1 < index:
|
| 652 |
+
stindex += 1
|
| 653 |
+
gtokenreport = "The error occurs right at the beginning of the two files.\n"
|
| 654 |
+
stokenreport = ""
|
| 655 |
+
if gtindex > 0:
|
| 656 |
+
nprev = 10 if gtindex >= 10 else gtindex
|
| 657 |
+
nnext = 10 if gtindex + 10 <= len(gold_ud.tokens) else len(gold_ud.tokens) - gtindex
|
| 658 |
+
nfirst = gtindex - nprev
|
| 659 |
+
prevtokens = ' '.join([''.join(gold_ud.characters[t.start:t.end]) for t in gold_ud.tokens[nfirst:gtindex]])
|
| 660 |
+
nexttokens = ' '.join([''.join(gold_ud.characters[t.start:t.end]) for t in gold_ud.tokens[gtindex:gtindex + nnext]])
|
| 661 |
+
gtokenreport = "File '{}':\n".format(gold_ud.path)
|
| 662 |
+
gtokenreport += " Token no. {} on line no. {} is the last one with all characters reproduced in the other file.\n".format(gtindex, gold_ud.tokens[gtindex-1].line)
|
| 663 |
+
gtokenreport += " The previous {} tokens are '{}'.\n".format(nprev, prevtokens)
|
| 664 |
+
gtokenreport += " The next {} tokens are '{}'.\n".format(nnext, nexttokens)
|
| 665 |
+
if stindex > 0:
|
| 666 |
+
nprev = 10 if stindex >= 10 else stindex
|
| 667 |
+
nnext = 10 if stindex + 10 <= len(system_ud.tokens) else len(system_ud.tokens) - stindex
|
| 668 |
+
nfirst = stindex - nprev
|
| 669 |
+
prevtokens = ' '.join([''.join(system_ud.characters[t.start:t.end]) for t in system_ud.tokens[nfirst:stindex]])
|
| 670 |
+
nexttokens = ' '.join([''.join(system_ud.characters[t.start:t.end]) for t in system_ud.tokens[stindex:stindex + nnext]])
|
| 671 |
+
stokenreport = "File '{}':\n".format(system_ud.path)
|
| 672 |
+
stokenreport += " Token no. {} on line no. {} is the last one with all characters reproduced in the other file.\n".format(stindex, system_ud.tokens[stindex-1].line)
|
| 673 |
+
stokenreport += " The previous {} tokens are '{}'.\n".format(nprev, prevtokens)
|
| 674 |
+
stokenreport += " The next {} tokens are '{}'.\n".format(nnext, nexttokens)
|
| 675 |
+
raise UDError(
|
| 676 |
+
"The concatenation of tokens in gold file and in system file differ!\n" + gtokenreport + stokenreport +
|
| 677 |
+
"First 20 differing characters in gold file: '{}' and system file: '{}'".format(
|
| 678 |
+
"".join(gold_ud.characters[index:index + 20]),
|
| 679 |
+
"".join(system_ud.characters[index:index + 20])
|
| 680 |
+
)
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
# Align words
|
| 684 |
+
alignment = align_words(gold_ud.words, system_ud.words)
|
| 685 |
+
|
| 686 |
+
# Compute the F1-scores
|
| 687 |
+
return {
|
| 688 |
+
"Tokens": spans_score(gold_ud.tokens, system_ud.tokens),
|
| 689 |
+
"Sentences": spans_score(gold_ud.sentences, system_ud.sentences),
|
| 690 |
+
"Words": alignment_score(alignment),
|
| 691 |
+
"UPOS": alignment_score(alignment, lambda w, _: w.columns[UPOS]),
|
| 692 |
+
"XPOS": alignment_score(alignment, lambda w, _: w.columns[XPOS]),
|
| 693 |
+
"UFeats": alignment_score(alignment, lambda w, _: w.columns[FEATS]),
|
| 694 |
+
"AllTags": alignment_score(alignment, lambda w, _: (w.columns[UPOS], w.columns[XPOS], w.columns[FEATS])),
|
| 695 |
+
"Lemmas": alignment_score(alignment, lambda w, ga: w.columns[LEMMA] if ga(w).columns[LEMMA] != "_" else "_"),
|
| 696 |
+
"UAS": alignment_score(alignment, lambda w, ga: ga(w.parent)),
|
| 697 |
+
"LAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL])),
|
| 698 |
+
"ELAS": enhanced_alignment_score(alignment, 0),
|
| 699 |
+
"EULAS": enhanced_alignment_score(alignment, 1),
|
| 700 |
+
"CLAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL]),
|
| 701 |
+
filter_fn=lambda w: w.is_content_deprel),
|
| 702 |
+
"MLAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL], w.columns[UPOS], w.columns[FEATS],
|
| 703 |
+
[(ga(c), c.columns[DEPREL], c.columns[UPOS], c.columns[FEATS])
|
| 704 |
+
for c in w.functional_children]),
|
| 705 |
+
filter_fn=lambda w: w.is_content_deprel),
|
| 706 |
+
"BLEX": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL],
|
| 707 |
+
w.columns[LEMMA] if ga(w).columns[LEMMA] != "_" else "_"),
|
| 708 |
+
filter_fn=lambda w: w.is_content_deprel),
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
def load_conllu_file(path, treebank_type=None):
|
| 714 |
+
"""
|
| 715 |
+
Reads a CoNLL-U file into internal representation.
|
| 716 |
+
|
| 717 |
+
Parameters
|
| 718 |
+
----------
|
| 719 |
+
path : str
|
| 720 |
+
The name of (and path to) the file.
|
| 721 |
+
treebank_type : dict, optional
|
| 722 |
+
Additional information about what we expect / should read. The default is None.
|
| 723 |
+
|
| 724 |
+
Returns
|
| 725 |
+
-------
|
| 726 |
+
UDRepresentation
|
| 727 |
+
The internal representation of the file contents, usable in evaluate().
|
| 728 |
+
"""
|
| 729 |
+
if treebank_type is None:
|
| 730 |
+
treebank_type = {}
|
| 731 |
+
_file = open(path, mode="r", **({"encoding": "utf-8"}))
|
| 732 |
+
return load_conllu(_file, path, treebank_type)
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def evaluate_wrapper(args):
|
| 737 |
+
"""
|
| 738 |
+
Takes file names and options from command line arguments, loads the files,
|
| 739 |
+
evaluates their similarity and returns the result of evaluate(). Use
|
| 740 |
+
`--help` to obtain their description (or see udtools.argparser.parse_args_scorer()).
|
| 741 |
+
|
| 742 |
+
Parameters
|
| 743 |
+
----------
|
| 744 |
+
args : argparse.Namespace
|
| 745 |
+
Command line arguments of the eval.py script.
|
| 746 |
+
|
| 747 |
+
Returns
|
| 748 |
+
-------
|
| 749 |
+
dict
|
| 750 |
+
Indexed by metric names, values are scores.
|
| 751 |
+
"""
|
| 752 |
+
treebank_type = {}
|
| 753 |
+
enhancements = list(args.enhancements)
|
| 754 |
+
treebank_type['no_gapping'] = 1 if '1' in enhancements else 0
|
| 755 |
+
treebank_type['no_shared_parents_in_coordination'] = 1 if '2' in enhancements else 0
|
| 756 |
+
treebank_type['no_shared_dependents_in_coordination'] = 1 if '3' in enhancements else 0
|
| 757 |
+
treebank_type['no_control'] = 1 if '4' in enhancements else 0
|
| 758 |
+
treebank_type['no_external_arguments_of_relative_clauses'] = 1 if '5' in enhancements else 0
|
| 759 |
+
treebank_type['no_case_info'] = 1 if '6' in enhancements else 0
|
| 760 |
+
treebank_type['no_empty_nodes'] = args.no_empty_nodes
|
| 761 |
+
treebank_type['multiple_roots_okay'] = args.multiple_roots_okay
|
| 762 |
+
|
| 763 |
+
# Load CoNLL-U files
|
| 764 |
+
gold_ud = load_conllu_file(args.gold_file, treebank_type)
|
| 765 |
+
system_ud = load_conllu_file(args.system_file, treebank_type)
|
| 766 |
+
return evaluate(gold_ud, system_ud)
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
def build_evaluation_table(evaluation, verbose=True, counts=False, enhanced=False):
|
| 771 |
+
"""
|
| 772 |
+
Creates a plaintext table with the results.
|
| 773 |
+
|
| 774 |
+
Parameters
|
| 775 |
+
----------
|
| 776 |
+
evaluation : dict
|
| 777 |
+
The output of the evaluate() function.
|
| 778 |
+
verbose : bool, optional
|
| 779 |
+
Print results of all metrics. (Otherwise, print only LAS P+R+F1.)
|
| 780 |
+
Default is True.
|
| 781 |
+
counts : bool, optional
|
| 782 |
+
Print raw counts of correct/gold/system/aligned words instead of
|
| 783 |
+
precision/recall/F1 for all metrics. Default is False.
|
| 784 |
+
enhanced : bool, optional
|
| 785 |
+
Include evaluation of enhanced graphs. Default is False.
|
| 786 |
+
|
| 787 |
+
Returns
|
| 788 |
+
-------
|
| 789 |
+
str
|
| 790 |
+
The table with results.
|
| 791 |
+
"""
|
| 792 |
+
text = []
|
| 793 |
+
|
| 794 |
+
# Print the evaluation
|
| 795 |
+
if not verbose and not counts:
|
| 796 |
+
text.append("LAS F1 Score: {:.2f}".format(100 * evaluation["LAS"].f1))
|
| 797 |
+
text.append("MLAS Score: {:.2f}".format(100 * evaluation["MLAS"].f1))
|
| 798 |
+
text.append("BLEX Score: {:.2f}".format(100 * evaluation["BLEX"].f1))
|
| 799 |
+
if enhanced:
|
| 800 |
+
text.append("ELAS F1 Score: {:.2f}".format(100 * evaluation["ELAS"].f1))
|
| 801 |
+
text.append("EULAS F1 Score: {:.2f}".format(100 * evaluation["EULAS"].f1))
|
| 802 |
+
else:
|
| 803 |
+
if counts:
|
| 804 |
+
text.append("Metric | Correct | Gold | Predicted | Aligned")
|
| 805 |
+
else:
|
| 806 |
+
text.append("Metric | Precision | Recall | F1 Score | AligndAcc")
|
| 807 |
+
text.append("-----------+-----------+-----------+-----------+-----------")
|
| 808 |
+
metrics = ["Tokens", "Sentences", "Words", "UPOS", "XPOS", "UFeats", "AllTags", "Lemmas", "UAS", "LAS", "CLAS", "MLAS", "BLEX"]
|
| 809 |
+
if enhanced:
|
| 810 |
+
metrics += ["ELAS", "EULAS"]
|
| 811 |
+
for metric in metrics:
|
| 812 |
+
if counts:
|
| 813 |
+
text.append("{:11}|{:10} |{:10} |{:10} |{:10}".format(
|
| 814 |
+
metric,
|
| 815 |
+
evaluation[metric].correct,
|
| 816 |
+
evaluation[metric].gold_total,
|
| 817 |
+
evaluation[metric].system_total,
|
| 818 |
+
evaluation[metric].aligned_total or (evaluation[metric].correct if metric == "Words" else "")
|
| 819 |
+
))
|
| 820 |
+
else:
|
| 821 |
+
text.append("{:11}|{:10.2f} |{:10.2f} |{:10.2f} |{}".format(
|
| 822 |
+
metric,
|
| 823 |
+
100 * evaluation[metric].precision,
|
| 824 |
+
100 * evaluation[metric].recall,
|
| 825 |
+
100 * evaluation[metric].f1,
|
| 826 |
+
"{:10.2f}".format(100 * evaluation[metric].aligned_accuracy) if evaluation[metric].aligned_accuracy is not None else ""
|
| 827 |
+
))
|
| 828 |
+
|
| 829 |
+
return "\n".join(text)
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
# Tests, which can be executed with `python -m unittest udeval`.
|
| 834 |
+
class TestAlignment(unittest.TestCase):
|
| 835 |
+
@staticmethod
|
| 836 |
+
def _load_words(words):
|
| 837 |
+
"""Prepare fake CoNLL-U files with fake HEAD to prevent multiple roots errors."""
|
| 838 |
+
lines, num_words = [], 0
|
| 839 |
+
for w in words:
|
| 840 |
+
parts = w.split(" ")
|
| 841 |
+
if len(parts) == 1:
|
| 842 |
+
num_words += 1
|
| 843 |
+
lines.append("{}\t{}\t_\t_\t_\t_\t{}\t_\t_\t_".format(num_words, parts[0], int(num_words>1)))
|
| 844 |
+
else:
|
| 845 |
+
lines.append("{}-{}\t{}\t_\t_\t_\t_\t_\t_\t_\t_".format(num_words + 1, num_words + len(parts) - 1, parts[0]))
|
| 846 |
+
for part in parts[1:]:
|
| 847 |
+
num_words += 1
|
| 848 |
+
lines.append("{}\t{}\t_\t_\t_\t_\t{}\t_\t_\t_".format(num_words, part, int(num_words>1)))
|
| 849 |
+
return load_conllu((io.StringIO if sys.version_info >= (3, 0) else io.BytesIO)("\n".join(lines+["\n"])),
|
| 850 |
+
"in memory test file", {})
|
| 851 |
+
|
| 852 |
+
def _test_exception(self, gold, system):
|
| 853 |
+
self.assertRaises(UDError, evaluate, self._load_words(gold), self._load_words(system))
|
| 854 |
+
|
| 855 |
+
def _test_ok(self, gold, system, correct):
|
| 856 |
+
metrics = evaluate(self._load_words(gold), self._load_words(system))
|
| 857 |
+
gold_words = sum((max(1, len(word.split(" ")) - 1) for word in gold))
|
| 858 |
+
system_words = sum((max(1, len(word.split(" ")) - 1) for word in system))
|
| 859 |
+
self.assertEqual((metrics["Words"].precision, metrics["Words"].recall, metrics["Words"].f1),
|
| 860 |
+
(correct / system_words, correct / gold_words, 2 * correct / (gold_words + system_words)))
|
| 861 |
+
|
| 862 |
+
def test_exception(self):
|
| 863 |
+
self._test_exception(["a"], ["b"])
|
| 864 |
+
|
| 865 |
+
def test_equal(self):
|
| 866 |
+
self._test_ok(["a"], ["a"], 1)
|
| 867 |
+
self._test_ok(["a", "b", "c"], ["a", "b", "c"], 3)
|
| 868 |
+
|
| 869 |
+
def test_equal_with_multiword(self):
|
| 870 |
+
self._test_ok(["abc a b c"], ["a", "b", "c"], 3)
|
| 871 |
+
self._test_ok(["a", "bc b c", "d"], ["a", "b", "c", "d"], 4)
|
| 872 |
+
self._test_ok(["abcd a b c d"], ["ab a b", "cd c d"], 4)
|
| 873 |
+
self._test_ok(["abc a b c", "de d e"], ["a", "bcd b c d", "e"], 5)
|
| 874 |
+
|
| 875 |
+
def test_alignment(self):
|
| 876 |
+
self._test_ok(["abcd"], ["a", "b", "c", "d"], 0)
|
| 877 |
+
self._test_ok(["abc", "d"], ["a", "b", "c", "d"], 1)
|
| 878 |
+
self._test_ok(["a", "bc", "d"], ["a", "b", "c", "d"], 2)
|
| 879 |
+
self._test_ok(["a", "bc b c", "d"], ["a", "b", "cd"], 2)
|
| 880 |
+
self._test_ok(["abc a BX c", "def d EX f"], ["ab a b", "cd c d", "ef e f"], 4)
|
| 881 |
+
self._test_ok(["ab a b", "cd bc d"], ["a", "bc", "d"], 2)
|
| 882 |
+
self._test_ok(["a", "bc b c", "d"], ["ab AX BX", "cd CX a"], 1)
|
ud-tools/udtools/src/udtools/utils.py
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import regex as re
|
| 2 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 3 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 4 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 5 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 6 |
+
# from udtools import Validator.
|
| 7 |
+
try:
|
| 8 |
+
from udtools.src.udtools.incident import Reference
|
| 9 |
+
except ModuleNotFoundError:
|
| 10 |
+
from udtools.incident import Reference
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Constants for the column indices
|
| 15 |
+
COLCOUNT=10
|
| 16 |
+
ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(COLCOUNT)
|
| 17 |
+
COLNAMES='ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC'.split(',')
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class CompiledRegexes:
|
| 22 |
+
"""
|
| 23 |
+
The CompiledRegexes class holds various regular expressions needed to
|
| 24 |
+
recognize individual elements of the CoNLL-U format, precompiled to speed
|
| 25 |
+
up parsing. Individual expressions are typically not enclosed in ^...$
|
| 26 |
+
because one can use re.fullmatch() if it is desired that the whole string
|
| 27 |
+
matches the expression.
|
| 28 |
+
"""
|
| 29 |
+
def __init__(self):
|
| 30 |
+
# Whitespace.
|
| 31 |
+
self.ws = re.compile(r"\s+")
|
| 32 |
+
# Two consecutive whitespaces.
|
| 33 |
+
self.ws2 = re.compile(r"\s\s")
|
| 34 |
+
# Regular word/node id: integer number.
|
| 35 |
+
self.wordid = re.compile(r"[1-9][0-9]*")
|
| 36 |
+
# Multiword token id: range of integers.
|
| 37 |
+
# The two parts are bracketed so they can be captured and processed separately.
|
| 38 |
+
self.mwtid = re.compile(r"([1-9][0-9]*)-([1-9][0-9]*)")
|
| 39 |
+
# Empty node id: "decimal" number (but 1.10 != 1.1).
|
| 40 |
+
# The two parts are bracketed so they can be captured and processed separately.
|
| 41 |
+
self.enodeid = re.compile(r"([0-9]+)\.([1-9][0-9]*)")
|
| 42 |
+
# New document comment line. Document id, if present, is bracketed.
|
| 43 |
+
self.newdoc = re.compile(r"#\s*newdoc(?:\s+(\S+))?")
|
| 44 |
+
# New paragraph comment line. Paragraph id, if present, is bracketed.
|
| 45 |
+
self.newpar = re.compile(r"#\s*newpar(?:\s+(\S+))?")
|
| 46 |
+
# Sentence id comment line. The actual id is bracketed.
|
| 47 |
+
self.sentid = re.compile(r"#\s*sent_id\s*=\s*(\S+)")
|
| 48 |
+
# Parallel sentence id comment line. The actual id as well as its predefined parts are bracketed.
|
| 49 |
+
self.parallelid = re.compile(r"#\s*parallel_id\s*=\s*(([a-z]+)/([-0-9a-z]+)(?:/(alt[1-9][0-9]*|part[1-9][0-9]*|alt[1-9][0-9]*part[1-9][0-9]*))?)")
|
| 50 |
+
# Sentence text comment line. The actual text is bracketed.
|
| 51 |
+
self.text = re.compile(r"#\s*text\s*=\s*(.*\S)")
|
| 52 |
+
# Global entity comment is a declaration of entity attributes in MISC.
|
| 53 |
+
# It occurs once per document and it is optional (only CorefUD data).
|
| 54 |
+
# The actual attribute declaration is bracketed so it can be captured in the match.
|
| 55 |
+
self.global_entity = re.compile(r"#\s*global\.Entity\s*=\s*(.+)")
|
| 56 |
+
# UPOS tag.
|
| 57 |
+
self.upos = re.compile(r"[A-Z]+")
|
| 58 |
+
# Feature=value pair.
|
| 59 |
+
# Feature name and feature value are bracketed so that each can be captured separately in the match.
|
| 60 |
+
self.featval = re.compile(r"([A-Z][A-Za-z0-9]*(?:\[[a-z0-9]+\])?)=(([A-Z0-9][A-Z0-9a-z]*)(,([A-Z0-9][A-Z0-9a-z]*))*)")
|
| 61 |
+
self.val = re.compile(r"[A-Z0-9][A-Za-z0-9]*")
|
| 62 |
+
# Basic parent reference (HEAD).
|
| 63 |
+
self.head = re.compile(r"(0|[1-9][0-9]*)")
|
| 64 |
+
# Enhanced parent reference (head).
|
| 65 |
+
self.ehead = re.compile(r"(0|[1-9][0-9]*)(\.[1-9][0-9]*)?")
|
| 66 |
+
# Basic dependency relation (including optional subtype).
|
| 67 |
+
self.deprel = re.compile(r"[a-z]+(:[a-z]+)?")
|
| 68 |
+
# Enhanced dependency relation (possibly with Unicode subtypes).
|
| 69 |
+
# Ll ... lowercase Unicode letters
|
| 70 |
+
# Lm ... modifier Unicode letters (e.g., superscript h)
|
| 71 |
+
# Lo ... other Unicode letters (all caseless scripts, e.g., Arabic)
|
| 72 |
+
# M .... combining diacritical marks
|
| 73 |
+
# Underscore is allowed between letters but not at beginning, end, or next to another underscore.
|
| 74 |
+
edeprelpart_resrc = r'[\p{Ll}\p{Lm}\p{Lo}\p{M}]+(_[\p{Ll}\p{Lm}\p{Lo}\p{M}]+)*'
|
| 75 |
+
# There must be always the universal part, consisting only of ASCII letters.
|
| 76 |
+
# There can be up to three additional, colon-separated parts: subtype, preposition and case.
|
| 77 |
+
# One of them, the preposition, may contain Unicode letters. We do not know which one it is
|
| 78 |
+
# (only if there are all four parts, we know it is the third one).
|
| 79 |
+
# ^[a-z]+(:[a-z]+)?(:[\p{Ll}\p{Lm}\p{Lo}\p{M}]+(_[\p{Ll}\p{Lm}\p{Lo}\p{M}]+)*)?(:[a-z]+)?$
|
| 80 |
+
edeprel_resrc = '^[a-z]+(:[a-z]+)?(:' + edeprelpart_resrc + ')?(:[a-z]+)?$'
|
| 81 |
+
self.edeprel = re.compile(edeprel_resrc)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# Global variables:
|
| 86 |
+
crex = CompiledRegexes()
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# Support functions.
|
| 91 |
+
|
| 92 |
+
def is_whitespace(line):
|
| 93 |
+
return crex.ws.fullmatch(line)
|
| 94 |
+
|
| 95 |
+
def is_word(cols):
|
| 96 |
+
return crex.wordid.fullmatch(cols[ID])
|
| 97 |
+
|
| 98 |
+
def is_multiword_token(cols):
|
| 99 |
+
return crex.mwtid.fullmatch(cols[ID])
|
| 100 |
+
|
| 101 |
+
def is_empty_node(cols):
|
| 102 |
+
return crex.enodeid.fullmatch(cols[ID])
|
| 103 |
+
|
| 104 |
+
def parse_empty_node_id(cols):
|
| 105 |
+
m = crex.enodeid.fullmatch(cols[ID])
|
| 106 |
+
assert m, 'parse_empty_node_id with non-empty node'
|
| 107 |
+
return m.groups()
|
| 108 |
+
|
| 109 |
+
def shorten(string):
|
| 110 |
+
return string if len(string) < 25 else string[:20]+'[...]'
|
| 111 |
+
|
| 112 |
+
def lspec2ud(deprel):
|
| 113 |
+
return deprel.split(':', 1)[0]
|
| 114 |
+
|
| 115 |
+
def nodeid2tuple(nodeid: str):
|
| 116 |
+
"""
|
| 117 |
+
Node ID can look like a decimal number, but 1.1 != 1.10. To be able to
|
| 118 |
+
sort node IDs, we need to be able to convert them to a pair of integers
|
| 119 |
+
(major and minor). For IDs of regular nodes, the ID will be converted to
|
| 120 |
+
int (major) and the minor will be set to zero.
|
| 121 |
+
"""
|
| 122 |
+
parts = [int(x) for x in nodeid.split('.', maxsplit=1)]
|
| 123 |
+
if len(parts) == 1:
|
| 124 |
+
parts.append(0)
|
| 125 |
+
return tuple(parts)
|
| 126 |
+
|
| 127 |
+
def formtl(node):
|
| 128 |
+
"""
|
| 129 |
+
Returns the word form of a node, possibly accompanied by its
|
| 130 |
+
transliteration (if available in the MISC column).
|
| 131 |
+
|
| 132 |
+
Parameters
|
| 133 |
+
----------
|
| 134 |
+
node : udapi.core.node.Node object
|
| 135 |
+
The node whose form we want to get.
|
| 136 |
+
|
| 137 |
+
Returns
|
| 138 |
+
-------
|
| 139 |
+
x : str
|
| 140 |
+
The form and translit, space-separated. Only form if translit
|
| 141 |
+
not available.
|
| 142 |
+
"""
|
| 143 |
+
x = node.form
|
| 144 |
+
if node.misc['Translit'] != '':
|
| 145 |
+
x += ' ' + node.misc['Translit']
|
| 146 |
+
return x
|
| 147 |
+
|
| 148 |
+
def lemmatl(node):
|
| 149 |
+
"""
|
| 150 |
+
Returns the lemma of a node, possibly accompanied by its transliteration
|
| 151 |
+
(if available in the MISC column).
|
| 152 |
+
|
| 153 |
+
Parameters
|
| 154 |
+
----------
|
| 155 |
+
node : udapi.core.node.Node object
|
| 156 |
+
The node whose form we want to get.
|
| 157 |
+
|
| 158 |
+
Returns
|
| 159 |
+
-------
|
| 160 |
+
x : str
|
| 161 |
+
The lemma and translit, space-separated. Only form if translit not
|
| 162 |
+
available.
|
| 163 |
+
"""
|
| 164 |
+
x = node.lemma
|
| 165 |
+
if node.misc['LTranslit'] != '':
|
| 166 |
+
x += ' ' + node.misc['LTranslit']
|
| 167 |
+
return x
|
| 168 |
+
|
| 169 |
+
def get_alt_language(node):
|
| 170 |
+
"""
|
| 171 |
+
In code-switching analysis of foreign words, an attribute in the MISC column
|
| 172 |
+
will hold the code of the language of the current word. Certain tests will
|
| 173 |
+
then use language-specific lists from that language instead of the main
|
| 174 |
+
language of the document. This function returns the alternative language
|
| 175 |
+
code if present, otherwise it returns None.
|
| 176 |
+
|
| 177 |
+
Parameters
|
| 178 |
+
----------
|
| 179 |
+
node : udapi.core.node.Node object
|
| 180 |
+
The node (word) whose language is being queried.
|
| 181 |
+
"""
|
| 182 |
+
if node.misc['Lang'] != '':
|
| 183 |
+
return node.misc['Lang']
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
def deps_list(cols):
|
| 187 |
+
"""
|
| 188 |
+
Parses the contents of the DEPS column and returns a list of incoming
|
| 189 |
+
enhanced dependencies. This is needed in early tests, before the sentence
|
| 190 |
+
has been fed to Udapi.
|
| 191 |
+
|
| 192 |
+
Parameters
|
| 193 |
+
----------
|
| 194 |
+
cols : list
|
| 195 |
+
The values of the columns on the current node / token line.
|
| 196 |
+
|
| 197 |
+
Raises
|
| 198 |
+
------
|
| 199 |
+
ValueError
|
| 200 |
+
If the contents of DEPS cannot be parsed. Note that this does not catch
|
| 201 |
+
all possible violations of the format, e.g., bad order of the relations
|
| 202 |
+
will not raise an exception.
|
| 203 |
+
|
| 204 |
+
Returns
|
| 205 |
+
-------
|
| 206 |
+
deps : list
|
| 207 |
+
Each list item is a two-member list, containing the parent index (head)
|
| 208 |
+
and the relation type (deprel).
|
| 209 |
+
"""
|
| 210 |
+
if cols[DEPS] == '_':
|
| 211 |
+
deps = []
|
| 212 |
+
else:
|
| 213 |
+
deps = [hd.split(':', 1) for hd in cols[DEPS].split('|')]
|
| 214 |
+
if any(hd for hd in deps if len(hd) != 2):
|
| 215 |
+
raise ValueError(f'malformed DEPS: {cols[DEPS]}')
|
| 216 |
+
return deps
|
| 217 |
+
|
| 218 |
+
def get_line_numbers_for_ids(state, sentence):
|
| 219 |
+
"""
|
| 220 |
+
Takes a list of sentence lines (mwt ranges, word nodes, empty nodes).
|
| 221 |
+
For each mwt / node / word, gets the number of the line in the input
|
| 222 |
+
file where the mwt / node / word occurs. We will need this in other
|
| 223 |
+
functions to be able to report the line on which an error occurred.
|
| 224 |
+
|
| 225 |
+
Parameters
|
| 226 |
+
----------
|
| 227 |
+
sentence : list
|
| 228 |
+
List of mwt / words / nodes, each represented as a list of columns.
|
| 229 |
+
|
| 230 |
+
Returns
|
| 231 |
+
-------
|
| 232 |
+
linenos : dict
|
| 233 |
+
Key: word ID (string, not int; decimal for empty nodes and range for
|
| 234 |
+
mwt lines). Value: 1-based index of the line in the file (int).
|
| 235 |
+
"""
|
| 236 |
+
linenos = {}
|
| 237 |
+
node_line = state.sentence_line - 1
|
| 238 |
+
for cols in sentence:
|
| 239 |
+
node_line += 1
|
| 240 |
+
linenos[cols[ID]] = node_line
|
| 241 |
+
# For normal words, add them also under integer keys, just in case
|
| 242 |
+
# we later forget to convert node.ord to string. But we cannot do the
|
| 243 |
+
# same for empty nodes and multiword tokens.
|
| 244 |
+
if is_word(cols):
|
| 245 |
+
linenos[int(cols[ID])] = node_line
|
| 246 |
+
return linenos
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def next_sentence(state, inp):
|
| 250 |
+
"""
|
| 251 |
+
This function yields one sentence at a time from the input stream.
|
| 252 |
+
|
| 253 |
+
This function is a generator. The caller can call it in a 'for x in ...'
|
| 254 |
+
loop. In each iteration of the caller's loop, the generator will generate
|
| 255 |
+
the next sentence, that is, it will read the next sentence from the input
|
| 256 |
+
stream. (Technically, the function returns an object, and the object will
|
| 257 |
+
then read the sentences within the caller's loop.)
|
| 258 |
+
|
| 259 |
+
Parameters
|
| 260 |
+
----------
|
| 261 |
+
state : udtools.state.State
|
| 262 |
+
The state of the validation run.
|
| 263 |
+
inp : file handle
|
| 264 |
+
A file open for reading or STDIN.
|
| 265 |
+
|
| 266 |
+
Yields
|
| 267 |
+
------
|
| 268 |
+
sentence_lines : list(str)
|
| 269 |
+
List of CoNLL-U lines that correspond to one sentence, including
|
| 270 |
+
initial comments (if any) and the final empty line.
|
| 271 |
+
"""
|
| 272 |
+
sentence_lines = [] # List of lines in the sentence (comments and tokens), minus final empty line, minus newline characters (and minus spurious lines that are neither comment lines nor token lines)
|
| 273 |
+
for line_counter, line in enumerate(inp):
|
| 274 |
+
state.current_line = line_counter + 1
|
| 275 |
+
line = line.rstrip("\n")
|
| 276 |
+
sentence_lines.append(line)
|
| 277 |
+
if not line or is_whitespace(line):
|
| 278 |
+
# If a line is not empty but contains only whitespace, we will
|
| 279 |
+
# pretend that it terminates a sentence in order to avoid
|
| 280 |
+
# subsequent misleading error messages.
|
| 281 |
+
yield sentence_lines
|
| 282 |
+
sentence_lines = []
|
| 283 |
+
else: # end of file
|
| 284 |
+
# If we found additional lines after the last empty line, yield them now.
|
| 285 |
+
if sentence_lines:
|
| 286 |
+
yield sentence_lines
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def features_present(state, line):
|
| 290 |
+
"""
|
| 291 |
+
In general, the annotation of morphological features is optional, although
|
| 292 |
+
highly encouraged. However, if the treebank does have features, then certain
|
| 293 |
+
features become required. This function is called when the first morphological
|
| 294 |
+
feature is encountered. It remembers that from now on, missing features can
|
| 295 |
+
be reported as errors. In addition, if any such errors have already been
|
| 296 |
+
encountered, they will be reported now.
|
| 297 |
+
|
| 298 |
+
Parameters
|
| 299 |
+
----------
|
| 300 |
+
state : udtools.state.State
|
| 301 |
+
The state of the validation run.
|
| 302 |
+
line : int
|
| 303 |
+
Number of the line where the current node occurs in the file.
|
| 304 |
+
|
| 305 |
+
Reads from state
|
| 306 |
+
----------------
|
| 307 |
+
seen_morpho_feature : int
|
| 308 |
+
Line number of the first occurrence of a morphological feature in the
|
| 309 |
+
corpus. None if no feature has been encountered so far.
|
| 310 |
+
delayed_feature_errors : list(udtools.incident.Incident)
|
| 311 |
+
The number of the most recently read line from the input file
|
| 312 |
+
(1-based).
|
| 313 |
+
|
| 314 |
+
Writes to state
|
| 315 |
+
----------------
|
| 316 |
+
seen_morpho_feature : int
|
| 317 |
+
Line number of the first occurrence of a morphological feature in the
|
| 318 |
+
corpus. None if no feature has been encountered so far.
|
| 319 |
+
"""
|
| 320 |
+
if not state.seen_morpho_feature:
|
| 321 |
+
state.seen_morpho_feature = line
|
| 322 |
+
for testid in state.delayed_feature_errors:
|
| 323 |
+
for occurrence in state.delayed_feature_errors[testid]['occurrences']:
|
| 324 |
+
occurrence['incident'].confirm()
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def get_caused_nonprojectivities(node):
|
| 328 |
+
"""
|
| 329 |
+
Checks whether a node is in a gap of a nonprojective edge. Report true only
|
| 330 |
+
if the node's parent is not in the same gap. (We use this function to check
|
| 331 |
+
that a punctuation node does not cause nonprojectivity. But if it has been
|
| 332 |
+
dragged to the gap with a larger subtree, then we do not blame it.) This
|
| 333 |
+
extra condition makes this function different from node.is_nonprojective_gap();
|
| 334 |
+
another difference is that instead of just detecting the nonprojectivity,
|
| 335 |
+
we return the nonprojective nodes so we can report them.
|
| 336 |
+
|
| 337 |
+
Parameters
|
| 338 |
+
----------
|
| 339 |
+
node : udapi.core.node.Node object
|
| 340 |
+
The tree node to be tested.
|
| 341 |
+
|
| 342 |
+
Returns
|
| 343 |
+
-------
|
| 344 |
+
cross : list of udapi.core.node.Node objects
|
| 345 |
+
The nodes whose attachment is nonprojective because of the current node.
|
| 346 |
+
"""
|
| 347 |
+
nodes = node.root.descendants
|
| 348 |
+
iid = node.ord
|
| 349 |
+
# We need to find all nodes that are not ancestors of this node and lie
|
| 350 |
+
# on other side of this node than their parent. First get the set of
|
| 351 |
+
# ancestors.
|
| 352 |
+
ancestors = []
|
| 353 |
+
current_node = node
|
| 354 |
+
while not current_node.is_root():
|
| 355 |
+
current_node = current_node.parent
|
| 356 |
+
ancestors.append(current_node)
|
| 357 |
+
maxid = nodes[-1].ord
|
| 358 |
+
# Get the lists of nodes to either side of id.
|
| 359 |
+
# Do not look beyond the parent (if it is in the same gap, it is the parent's responsibility).
|
| 360 |
+
pid = node.parent.ord
|
| 361 |
+
if pid < iid:
|
| 362 |
+
leftidrange = range(pid + 1, iid) # ranges are open from the right (i.e. iid-1 is the last number)
|
| 363 |
+
rightidrange = range(iid + 1, maxid + 1)
|
| 364 |
+
else:
|
| 365 |
+
leftidrange = range(1, iid)
|
| 366 |
+
rightidrange = range(iid + 1, pid)
|
| 367 |
+
left = [n for n in nodes if n.ord in leftidrange]
|
| 368 |
+
right = [n for n in nodes if n.ord in rightidrange]
|
| 369 |
+
# Exclude nodes whose parents are ancestors of id.
|
| 370 |
+
leftna = [x for x in left if x.parent not in ancestors]
|
| 371 |
+
rightna = [x for x in right if x.parent not in ancestors]
|
| 372 |
+
leftcross = [x for x in leftna if x.parent.ord > iid]
|
| 373 |
+
rightcross = [x for x in rightna if x.parent.ord < iid]
|
| 374 |
+
# Once again, exclude nonprojectivities that are caused by ancestors of id.
|
| 375 |
+
if pid < iid:
|
| 376 |
+
rightcross = [x for x in rightcross if x.parent.ord > pid]
|
| 377 |
+
else:
|
| 378 |
+
leftcross = [x for x in leftcross if x.parent.ord < pid]
|
| 379 |
+
# Do not return just a boolean value. Return the nonprojective nodes so we can report them.
|
| 380 |
+
return sorted(leftcross + rightcross)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def get_gap(node):
|
| 384 |
+
"""
|
| 385 |
+
Returns the list of nodes between node and its parent that are not dominated
|
| 386 |
+
by the parent. If the list is not empty, the node is attached nonprojectively.
|
| 387 |
+
|
| 388 |
+
Note that the Udapi Node class does not have a method like this. It has
|
| 389 |
+
is_nonprojective(), which returns the boolean decision without showing the
|
| 390 |
+
nodes in the gap. There is also the function is_nonprojective_gap() but it,
|
| 391 |
+
too, does not deliver what we need.
|
| 392 |
+
|
| 393 |
+
Parameters
|
| 394 |
+
----------
|
| 395 |
+
node : udapi.core.node.Node object
|
| 396 |
+
The tree node to be tested.
|
| 397 |
+
|
| 398 |
+
Returns
|
| 399 |
+
-------
|
| 400 |
+
gap : list of udapi.core.node.Node objects
|
| 401 |
+
The nodes in the gap of the current node's relation to its parent,
|
| 402 |
+
sorted by their ords (IDs).
|
| 403 |
+
"""
|
| 404 |
+
iid = node.ord
|
| 405 |
+
pid = node.parent.ord
|
| 406 |
+
if iid < pid:
|
| 407 |
+
rangebetween = range(iid + 1, pid)
|
| 408 |
+
else:
|
| 409 |
+
rangebetween = range(pid + 1, iid)
|
| 410 |
+
gap = []
|
| 411 |
+
if rangebetween:
|
| 412 |
+
gap = [n for n in node.root.descendants if n.ord in rangebetween and not n in node.parent.descendants]
|
| 413 |
+
return sorted(gap)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def create_references(nodes, state, comment=''):
|
| 417 |
+
"""
|
| 418 |
+
Takes a list of nodes and converts it to a list of Reference objects to be
|
| 419 |
+
reported with an Incident.
|
| 420 |
+
|
| 421 |
+
Parameters
|
| 422 |
+
----------
|
| 423 |
+
nodes : list(udapi.core.node.Node)
|
| 424 |
+
The nodes to which we wish to refer.
|
| 425 |
+
state : udtools.state.State
|
| 426 |
+
The state of the validation run.
|
| 427 |
+
comment : str
|
| 428 |
+
The comment to add to each reference.
|
| 429 |
+
|
| 430 |
+
Returns
|
| 431 |
+
-------
|
| 432 |
+
references : list(udtools.incident.Reference)
|
| 433 |
+
"""
|
| 434 |
+
return [Reference(nodeid=str(x.ord), sentid=state.sentence_id, filename=state.get_current_file_name(), lineno=state.current_node_linenos[str(x.ord)], comment=comment) for x in nodes]
|
ud-tools/udtools/src/udtools/validator.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /usr/bin/env python3
|
| 2 |
+
# Original code (2015) by Filip Ginter and Sampo Pyysalo.
|
| 3 |
+
# DZ 2018-11-04: Porting the validator to Python 3.
|
| 4 |
+
# DZ: Many subsequent changes. See the git history.
|
| 5 |
+
import sys
|
| 6 |
+
import io
|
| 7 |
+
import argparse
|
| 8 |
+
# Once we know that the low-level CoNLL-U format is OK, we will be able to use
|
| 9 |
+
# the Udapi library to access the data and perform the tests at higher levels.
|
| 10 |
+
import udapi.block.read.conllu
|
| 11 |
+
# Allow using this module from the root folder of tools even if it is not
|
| 12 |
+
# installed as a package: use the relative path validator/src/validator for
|
| 13 |
+
# submodules. If the path is not available, try the standard qualification,
|
| 14 |
+
# assuming that the user has installed udtools from PyPI and then called
|
| 15 |
+
# from udtools import Validator.
|
| 16 |
+
try:
|
| 17 |
+
import udtools.src.udtools.utils as utils
|
| 18 |
+
from udtools.src.udtools.incident import Error, TestClass
|
| 19 |
+
from udtools.src.udtools.state import State
|
| 20 |
+
import udtools.src.udtools.data as data
|
| 21 |
+
from udtools.src.udtools.level6 import Level6
|
| 22 |
+
except ModuleNotFoundError:
|
| 23 |
+
import udtools.utils as utils
|
| 24 |
+
from udtools.incident import Error, TestClass
|
| 25 |
+
from udtools.state import State
|
| 26 |
+
import udtools.data as data
|
| 27 |
+
from udtools.level6 import Level6
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class Validator(Level6):
|
| 32 |
+
def __init__(self, lang=None, level=None, check_coref=None, args=None, datapath=None, output=sys.stderr, max_store=0):
|
| 33 |
+
"""
|
| 34 |
+
Initialization of the Validator class.
|
| 35 |
+
|
| 36 |
+
Parameters
|
| 37 |
+
----------
|
| 38 |
+
lang : str
|
| 39 |
+
ISO code of the main language of the data to be validated.
|
| 40 |
+
If not provided separately, it will be searched for in args.
|
| 41 |
+
If not provided in args either, default is 'ud' (no lang-spec tests).
|
| 42 |
+
level : int
|
| 43 |
+
Validation level ranging from 1 to 5.
|
| 44 |
+
If not provided separately, it will be searched for in args.
|
| 45 |
+
If not provided in args either, default is 5 (all UD tests).
|
| 46 |
+
check_coref: bool
|
| 47 |
+
Should the optional coreference-related tests be performed?
|
| 48 |
+
If not provided separately, it will be searched for in args.
|
| 49 |
+
The default value is False.
|
| 50 |
+
args : argparse.Namespace, optional
|
| 51 |
+
Parsed commandline arguments, if any. The default is None.
|
| 52 |
+
Validator itself does not need to search this namespace unless one
|
| 53 |
+
of its own arguments (lang, level... see above) is not provided
|
| 54 |
+
directly to the constructor. However, there may be other arguments
|
| 55 |
+
that have to be passed to the Incident class whenever an incident
|
| 56 |
+
(error or warning) is recorded by the Validator.
|
| 57 |
+
datapath : str, optional
|
| 58 |
+
Path to the folder with JSON files specifying language-specific
|
| 59 |
+
behavior. If not provided, the Data class will try expected
|
| 60 |
+
locations relative to the module.
|
| 61 |
+
output : outstream object, default sys.stderr
|
| 62 |
+
Where to report incidents when they are encountered. Default is
|
| 63 |
+
sys.stderr, it could be also sys.stdout, an open file handle, or
|
| 64 |
+
None. If it is None, the output is suppressed (same as the --quiet
|
| 65 |
+
command line option) and errors are only saved in state for later
|
| 66 |
+
processing.
|
| 67 |
+
max_store : int, optional
|
| 68 |
+
How many incidents to store in the validation state? Default 0
|
| 69 |
+
means no limit. Limiting this helps save memory with large
|
| 70 |
+
treebanks and large numbers of incidents. Especially if the
|
| 71 |
+
intended use of the Validator object is to immediately report
|
| 72 |
+
incidents without returning to them later. The limit is applied
|
| 73 |
+
separately to each test class.
|
| 74 |
+
"""
|
| 75 |
+
self.data = data.Data(datapath=datapath)
|
| 76 |
+
if not args:
|
| 77 |
+
args = argparse.Namespace()
|
| 78 |
+
# Since we allow args that were not created by our ArgumentParser,
|
| 79 |
+
# we must be prepared that some attributes do not exist. It will be
|
| 80 |
+
# thus safer to access them as a dictionary.
|
| 81 |
+
args_dict = vars(args)
|
| 82 |
+
if not lang:
|
| 83 |
+
if 'lang' in args_dict and args_dict['lang'] != None:
|
| 84 |
+
lang = args_dict['lang']
|
| 85 |
+
else:
|
| 86 |
+
lang = 'ud'
|
| 87 |
+
if not level:
|
| 88 |
+
if 'level' in args_dict and args_dict['level'] != None:
|
| 89 |
+
level = args_dict['level']
|
| 90 |
+
else:
|
| 91 |
+
level = 5
|
| 92 |
+
if check_coref == None:
|
| 93 |
+
if 'check_coref' in args_dict and args_dict['check_coref'] != None:
|
| 94 |
+
check_coref = args_dict['check_coref']
|
| 95 |
+
else:
|
| 96 |
+
check_coref = False
|
| 97 |
+
self.lang = lang
|
| 98 |
+
self.level = level
|
| 99 |
+
self.check_coref = check_coref
|
| 100 |
+
# Instead of saving the args namespace, we should just save the
|
| 101 |
+
# configuration of incident storing and reporting.
|
| 102 |
+
self.incfg = {}
|
| 103 |
+
if 'quiet' in args_dict:
|
| 104 |
+
self.incfg['quiet'] = args_dict['quiet']
|
| 105 |
+
if 'no_warnings' in args_dict:
|
| 106 |
+
self.incfg['no_warnings'] = args_dict['no_warnings']
|
| 107 |
+
if 'exclude' in args_dict and args_dict['exclude']:
|
| 108 |
+
self.incfg['exclude'] = args_dict['exclude']
|
| 109 |
+
if 'include_only' in args_dict and args_dict['include_only']:
|
| 110 |
+
self.incfg['include_only'] = args_dict['include_only']
|
| 111 |
+
if 'max_err' in args_dict:
|
| 112 |
+
self.incfg['max_err'] = args_dict['max_err']
|
| 113 |
+
if 'input' in args_dict and len(args_dict['input']) > 1:
|
| 114 |
+
self.incfg['report_filename'] = True
|
| 115 |
+
self.incfg['output'] = output
|
| 116 |
+
self.incfg['max_store'] = max_store
|
| 117 |
+
self.conllu_reader = udapi.block.read.conllu.Conllu()
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
#==============================================================================
|
| 122 |
+
# Entry points.
|
| 123 |
+
#==============================================================================
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def validate_files(self, filenames, state=None):
|
| 127 |
+
"""
|
| 128 |
+
The main entry point, takes a list of filenames that constitute
|
| 129 |
+
the treebank to be validated. Note that there are tests that consider
|
| 130 |
+
data from the whole treebank across file boundaries, for example the
|
| 131 |
+
uniqueness of sentence ids. Unlike other validation methods, this one
|
| 132 |
+
creates a State object (holding the state of validation) and returns
|
| 133 |
+
it. The other validation methods take the state from the caller and
|
| 134 |
+
use it (read from it and write to it).
|
| 135 |
+
|
| 136 |
+
Parameters
|
| 137 |
+
----------
|
| 138 |
+
filenames : list(str)
|
| 139 |
+
List of paths (filenames) to open and validate together. Filename
|
| 140 |
+
'-' will be interpreted as STDIN.
|
| 141 |
+
state : udtools.state.State, optional
|
| 142 |
+
State from previous validation calls if the current call should
|
| 143 |
+
take them into account. If not provided, a new state will be
|
| 144 |
+
initialized.
|
| 145 |
+
|
| 146 |
+
Returns
|
| 147 |
+
-------
|
| 148 |
+
state : udtools.state.State
|
| 149 |
+
The resulting state of the validation. May contain the overview
|
| 150 |
+
of all encountered incidents (errors or warnings) if requested.
|
| 151 |
+
"""
|
| 152 |
+
if state == None:
|
| 153 |
+
state = State()
|
| 154 |
+
for filename in filenames:
|
| 155 |
+
self.validate_file(filename, state)
|
| 156 |
+
self.validate_end(state)
|
| 157 |
+
return state
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def validate_file(self, filename, state=None):
|
| 161 |
+
"""
|
| 162 |
+
An envelope around validate_file_handle(). Opens a file or uses STDIN,
|
| 163 |
+
then calls validate_file_handle() on it.
|
| 164 |
+
|
| 165 |
+
Parameters
|
| 166 |
+
----------
|
| 167 |
+
filename : str
|
| 168 |
+
Name of the file to be read and validated. '-' means STDIN.
|
| 169 |
+
state : udtools.state.State, optional
|
| 170 |
+
The state of the validation run. If not provided, a new state will
|
| 171 |
+
be initialized.
|
| 172 |
+
|
| 173 |
+
Returns
|
| 174 |
+
-------
|
| 175 |
+
state : udtools.state.State
|
| 176 |
+
The resulting state of the validation. May contain the overview
|
| 177 |
+
of all encountered incidents (errors or warnings) if requested.
|
| 178 |
+
"""
|
| 179 |
+
if state == None:
|
| 180 |
+
state = State()
|
| 181 |
+
state.current_file_name = filename
|
| 182 |
+
if filename == '-':
|
| 183 |
+
# Set PYTHONIOENCODING=utf-8 before starting Python.
|
| 184 |
+
# See https://docs.python.org/3/using/cmdline.html#envvar-PYTHONIOENCODING
|
| 185 |
+
# Otherwise ANSI will be read in Windows and
|
| 186 |
+
# locale-dependent encoding will be used elsewhere.
|
| 187 |
+
self.validate_file_handle(sys.stdin, state)
|
| 188 |
+
else:
|
| 189 |
+
with io.open(filename, 'r', encoding='utf-8') as inp:
|
| 190 |
+
self.validate_file_handle(inp, state)
|
| 191 |
+
return state
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def validate_file_handle(self, inp, state=None):
|
| 195 |
+
"""
|
| 196 |
+
The main entry point for all validation tests applied to one input file.
|
| 197 |
+
It reads sentences from the input stream one by one, each sentence is
|
| 198 |
+
immediately tested.
|
| 199 |
+
|
| 200 |
+
Parameters
|
| 201 |
+
----------
|
| 202 |
+
inp : open file handle
|
| 203 |
+
The CoNLL-U-formatted input stream.
|
| 204 |
+
state : udtools.state.State, optional
|
| 205 |
+
The state of the validation run. If not provided, a new state will
|
| 206 |
+
be initialized.
|
| 207 |
+
|
| 208 |
+
Returns
|
| 209 |
+
-------
|
| 210 |
+
state : udtools.state.State
|
| 211 |
+
The resulting state of the validation. May contain the overview
|
| 212 |
+
of all encountered incidents (errors or warnings) if requested.
|
| 213 |
+
"""
|
| 214 |
+
if state == None:
|
| 215 |
+
state = State()
|
| 216 |
+
for lines in utils.next_sentence(state, inp):
|
| 217 |
+
self.validate_sentence(lines, state)
|
| 218 |
+
self.check_newlines(state, inp) # level 1
|
| 219 |
+
return state
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def validate_sentence(self, all_lines, state=None):
|
| 223 |
+
"""
|
| 224 |
+
Entry point for all validation tests applied to one sentence. It can
|
| 225 |
+
be called from annotation tools to check the sentence once annotated.
|
| 226 |
+
Note that validate_file_handle() calls it after it was able to
|
| 227 |
+
recognize a sequence of lines that constitute a sentence; some low-
|
| 228 |
+
level errors may occur while recognizing the sentence.
|
| 229 |
+
|
| 230 |
+
Parameters
|
| 231 |
+
----------
|
| 232 |
+
all_lines : list(str)
|
| 233 |
+
List of lines in the sentence (comments and tokens), minus final
|
| 234 |
+
empty line, minus newline characters (and minus spurious lines
|
| 235 |
+
that are neither comment lines nor token lines).
|
| 236 |
+
state : udtools.state.State, optional
|
| 237 |
+
The state of the validation run. If not provided, a new state will
|
| 238 |
+
be initialized.
|
| 239 |
+
|
| 240 |
+
Returns
|
| 241 |
+
-------
|
| 242 |
+
state : udtools.state.State
|
| 243 |
+
The resulting state of the validation. May contain the overview
|
| 244 |
+
of all encountered incidents (errors or warnings) if requested.
|
| 245 |
+
"""
|
| 246 |
+
if state == None:
|
| 247 |
+
state = State()
|
| 248 |
+
state.current_lines = all_lines
|
| 249 |
+
# Low-level errors typically mean that we cannot perform further tests
|
| 250 |
+
# because we could choke on trying to access non-existent data. Or we
|
| 251 |
+
# may succeed in performing them but the error messages may be misleading.
|
| 252 |
+
if not self.check_sentence_lines(state): # level 1
|
| 253 |
+
return state
|
| 254 |
+
if not self.check_sentence_columns(state): # level 1
|
| 255 |
+
return state
|
| 256 |
+
if not self.check_id_sequence(state): # level 1
|
| 257 |
+
return state
|
| 258 |
+
if not self.check_token_range_overlaps(state): # level 1
|
| 259 |
+
return state
|
| 260 |
+
if self.level >= 2:
|
| 261 |
+
if not self.check_id_references(state): # level 2
|
| 262 |
+
return state
|
| 263 |
+
# Check that the basic tree is single-rooted, connected, cycle-free.
|
| 264 |
+
if not self.check_tree(state): # level 2
|
| 265 |
+
return state
|
| 266 |
+
# Tests of individual nodes that operate on pre-Udapi data structures.
|
| 267 |
+
# Some of them (bad feature format) may lead to skipping Udapi completely.
|
| 268 |
+
colssafe = True
|
| 269 |
+
for i in range(len(state.current_token_node_table)):
|
| 270 |
+
lineno = state.sentence_line + i
|
| 271 |
+
cols = state.current_token_node_table[i]
|
| 272 |
+
# Multiword tokens and empty nodes can or must have certain fields empty.
|
| 273 |
+
if utils.is_multiword_token(cols):
|
| 274 |
+
self.check_mwt_empty_vals(state, cols, lineno)
|
| 275 |
+
if utils.is_empty_node(cols):
|
| 276 |
+
self.check_empty_node_empty_vals(state, cols, lineno) # level 2
|
| 277 |
+
if utils.is_word(cols) or utils.is_empty_node(cols):
|
| 278 |
+
self.check_upos(state, cols, lineno) # level 2
|
| 279 |
+
colssafe = self.check_feats_format(state, cols, lineno) and colssafe # level 2 (level 4 tests will be called later)
|
| 280 |
+
self.check_deprel_format(state, cols, lineno) # level 2
|
| 281 |
+
self.check_deps_format(state, cols, lineno) # level 2; must operate on pre-Udapi DEPS (to see order of relations)
|
| 282 |
+
self.check_misc(state, cols, lineno) # level 2; must operate on pre-Udapi MISC
|
| 283 |
+
if not colssafe:
|
| 284 |
+
return state
|
| 285 |
+
# Get line numbers for all nodes including empty ones (here linenos
|
| 286 |
+
# is a dict indexed by cols[ID], i.e., a string).
|
| 287 |
+
state.current_node_linenos = utils.get_line_numbers_for_ids(state, state.current_token_node_table)
|
| 288 |
+
# Check that enhanced graphs exist either for all sentences or for
|
| 289 |
+
# none.
|
| 290 |
+
self.check_deps_all_or_none(state) # level 2
|
| 291 |
+
# Check sentence-level metadata in the comment lines.
|
| 292 |
+
self.check_sent_id(state) # level 2
|
| 293 |
+
self.check_parallel_id(state) # level 2
|
| 294 |
+
self.check_text_meta(state) # level 2
|
| 295 |
+
# If we successfully passed all the critical tests above, it is
|
| 296 |
+
# probably safe to give the lines to Udapi and ask it to build the
|
| 297 |
+
# tree data structure for us. Udapi does not want to get the
|
| 298 |
+
# terminating empty line.
|
| 299 |
+
tree = self.build_tree_udapi(all_lines)
|
| 300 |
+
# Tests of individual nodes with Udapi.
|
| 301 |
+
nodes = tree.descendants_and_empty
|
| 302 |
+
for node in nodes:
|
| 303 |
+
if self.level >= 3:
|
| 304 |
+
self.check_zero_root(state, node) # level 3
|
| 305 |
+
self.check_enhanced_orphan(state, node) # level 3
|
| 306 |
+
if self.level >= 4:
|
| 307 |
+
# To disallow words with spaces everywhere, use --lang ud.
|
| 308 |
+
self.check_words_with_spaces(state, node) # level 4
|
| 309 |
+
self.check_feature_values(state, node) # level 4
|
| 310 |
+
self.check_deprels(state, node) # level 4
|
| 311 |
+
if self.level >= 5:
|
| 312 |
+
self.check_auxiliary_verbs(state, node) # level 5
|
| 313 |
+
self.check_copula_lemmas(state, node) # level 5
|
| 314 |
+
# Tests on whole trees and enhanced graphs.
|
| 315 |
+
self.check_egraph_connected(state, nodes) # level 2
|
| 316 |
+
if self.level >= 3:
|
| 317 |
+
# Level 3 checks universally valid consequences of annotation
|
| 318 |
+
# guidelines. Look at regular nodes and basic tree, not at
|
| 319 |
+
# enhanced graph.
|
| 320 |
+
basic_nodes = tree.descendants
|
| 321 |
+
for node in basic_nodes:
|
| 322 |
+
self.check_expected_features(state, node)
|
| 323 |
+
self.check_upos_vs_deprel(state, node)
|
| 324 |
+
self.check_flat_foreign(state, node)
|
| 325 |
+
self.check_left_to_right_relations(state, node)
|
| 326 |
+
self.check_single_subject(state, node)
|
| 327 |
+
self.check_single_object(state, node)
|
| 328 |
+
self.check_nmod_obl(state, node)
|
| 329 |
+
self.check_orphan(state, node)
|
| 330 |
+
self.check_functional_leaves(state, node)
|
| 331 |
+
self.check_fixed_span(state, node)
|
| 332 |
+
self.check_goeswith_span(state, node)
|
| 333 |
+
self.check_goeswith_morphology_and_edeps(state, node)
|
| 334 |
+
self.check_projective_punctuation(state, node)
|
| 335 |
+
# Optional checks for CorefUD treebanks. They operate on MISC and
|
| 336 |
+
# currently do not use the Udapi data structures.
|
| 337 |
+
if self.check_coref:
|
| 338 |
+
self.check_misc_entity(state)
|
| 339 |
+
return state
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def build_tree_udapi(self, lines):
|
| 343 |
+
"""
|
| 344 |
+
Calls Udapi to build its data structures from the CoNLL-U lines
|
| 345 |
+
representing one sentence.
|
| 346 |
+
|
| 347 |
+
Parameters
|
| 348 |
+
----------
|
| 349 |
+
lines : list(str)
|
| 350 |
+
Lines as in the CoNLL-U file, including sentence-level comments if
|
| 351 |
+
any, but without the newline character at the end of each line.
|
| 352 |
+
The sentence-terminating empty line is optional in this method.
|
| 353 |
+
|
| 354 |
+
Returns
|
| 355 |
+
-------
|
| 356 |
+
root : udapi.core.node.Node object
|
| 357 |
+
The artificial root node (all other nodes and all tree attributes
|
| 358 |
+
can be accessed from it).
|
| 359 |
+
"""
|
| 360 |
+
# If the final empty line is present, get rid of it. Udapi would die
|
| 361 |
+
# when trying to access line[0].
|
| 362 |
+
mylines = lines
|
| 363 |
+
if len(mylines) > 0 and (not mylines[-1] or utils.is_whitespace(mylines[-1])):
|
| 364 |
+
mylines = lines[0:-1]
|
| 365 |
+
root = self.conllu_reader.read_tree_from_lines(mylines)
|
| 366 |
+
# We should not return an empty tree (root should not be None).
|
| 367 |
+
# But we should not be here if the lines are so bad that no tree is built.
|
| 368 |
+
assert(root)
|
| 369 |
+
return root
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def validate_end(self, state=None):
|
| 373 |
+
"""
|
| 374 |
+
Final tests after processing the entire treebank (possibly multiple files).
|
| 375 |
+
|
| 376 |
+
Parameters
|
| 377 |
+
----------
|
| 378 |
+
state : udtools.state.State, optional
|
| 379 |
+
The state of the validation run. If not provided, a new state will
|
| 380 |
+
be initialized. (This is only to unify the interface of all the
|
| 381 |
+
validate_xxx() methods. Note however that specifically for this
|
| 382 |
+
method, it does not make sense to run it without state from other
|
| 383 |
+
validation calls.)
|
| 384 |
+
|
| 385 |
+
Returns
|
| 386 |
+
-------
|
| 387 |
+
state : udtools.state.State
|
| 388 |
+
The resulting state of the validation. May contain the overview
|
| 389 |
+
of all encountered incidents (errors or warnings) if requested.
|
| 390 |
+
"""
|
| 391 |
+
if state == None:
|
| 392 |
+
state = State()
|
| 393 |
+
# After reading the entire treebank (perhaps multiple files), check whether
|
| 394 |
+
# the DEPS annotation was not a mere copy of the basic trees.
|
| 395 |
+
if self.level>2 and state.seen_enhanced_graph and not state.seen_enhancement:
|
| 396 |
+
Error(
|
| 397 |
+
state=state, config=self.incfg,
|
| 398 |
+
level=3,
|
| 399 |
+
testclass=TestClass.ENHANCED,
|
| 400 |
+
testid='edeps-identical-to-basic-trees',
|
| 401 |
+
message="Enhanced graphs are copies of basic trees in the entire dataset. This can happen for some simple sentences where there is nothing to enhance, but not for all sentences. If none of the enhancements from the guidelines (https://universaldependencies.org/u/overview/enhanced-syntax.html) are annotated, the DEPS should be left unspecified"
|
| 402 |
+
).confirm()
|
| 403 |
+
return state
|
ud-tools/udtools/tests/test-cases/README.md
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Test the validator in Windows command line (cmd)
|
| 2 |
+
|
| 3 |
+
```batch
|
| 4 |
+
tools\test-cases\test.bat 2> tools\test-cases\test.log
|
| 5 |
+
```
|
ud-tools/udtools/tests/test-cases/eval/cs_pud-gold.conllu
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ud-tools/udtools/tests/test-cases/eval/cs_pud-plain.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ud-tools/udtools/tests/test-cases/eval/cs_pud-udpipe-pdtc-ud-2.17-251125.conllu
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ud-tools/udtools/tests/test-cases/eval/eval.txt
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metric | Precision | Recall | F1 Score | AligndAcc
|
| 2 |
+
-----------+-----------+-----------+-----------+-----------
|
| 3 |
+
Tokens | 99.50 | 99.80 | 99.65 |
|
| 4 |
+
Sentences | 98.01 | 98.60 | 98.31 |
|
| 5 |
+
Words | 99.50 | 99.80 | 99.65 |
|
| 6 |
+
UPOS | 95.86 | 96.15 | 96.01 | 96.34
|
| 7 |
+
XPOS | 77.55 | 77.78 | 77.67 | 77.94
|
| 8 |
+
UFeats | 90.50 | 90.77 | 90.64 | 90.95
|
| 9 |
+
AllTags | 76.71 | 76.94 | 76.83 | 77.09
|
| 10 |
+
Lemmas | 97.26 | 97.55 | 97.40 | 97.74
|
| 11 |
+
UAS | 92.14 | 92.41 | 92.27 | 92.60
|
| 12 |
+
LAS | 88.96 | 89.23 | 89.10 | 89.41
|
| 13 |
+
CLAS | 87.59 | 88.01 | 87.80 | 88.28
|
| 14 |
+
MLAS | 74.01 | 74.37 | 74.19 | 74.59
|
| 15 |
+
BLEX | 85.52 | 85.93 | 85.72 | 86.19
|
| 16 |
+
ELAS | 0.00 | 0.00 | 0.00 |
|
| 17 |
+
EULAS | 0.00 | 0.00 | 0.00 |
|
ud-tools/udtools/tests/test-cases/invalid-level1/.gitattributes
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Do not convert CRLF to LF in the test file that intentionally contains CRLF.
|
| 2 |
+
non-unix-newline.conllu -text
|
ud-tools/udtools/tests/test-cases/invalid-level1/columns-format-minimal.conllu
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sent_id = tanl1
|
| 2 |
+
# text = LONDRA .
|
| 3 |
+
1 LONDRA Lon dra NOUN SP _ 0 root _ _
|
| 4 |
+
2 . . PUNCT FS _ 1 punct _ _
|
| 5 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/columns-format.conllu
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# errors = number-of-columns, empty-column, leading-whitespace, trailing-whitespace, repeated-whitespace, invalid-whitespace-mwt, invalid-whitespace
|
| 2 |
+
# sent_id = tanl1
|
| 3 |
+
# text = LONDRA .
|
| 4 |
+
1 LONDRA Londra NOUN SP _ 0 root _ _ X
|
| 5 |
+
2 . . PUNCT FS _ 1 punct _ _
|
| 6 |
+
|
| 7 |
+
# sent_id = tanl2
|
| 8 |
+
# text = Gas dalla statua .
|
| 9 |
+
# this is comment
|
| 10 |
+
1 Gas gas NOUN S Gender=Masc|Number=Sing 0 root _ _
|
| 11 |
+
2-3 da lla _ _ _ _ _ _ _ _
|
| 12 |
+
2 da da ADP _ 4 case _ _
|
| 13 |
+
3 la la DET RD Gender=Fem|Number=Sing|PronType=Art 4 det _ _
|
| 14 |
+
4 statua statua NOUN S Gender=Fem|Number=Sing 1 nmod _ _
|
| 15 |
+
5 . . PUNCT FS _ 1 punct _ _
|
| 16 |
+
|
| 17 |
+
# sent_id = tanl3
|
| 18 |
+
# text = Evacuata la Tate Gallery .
|
| 19 |
+
1 Evacuata evacuare VERB V Gender=Fem|Number=Sing 0 root _ _
|
| 20 |
+
2 la il DE T RD Gender=Fem|Number=Sing|PronType=Art 3 det _ _
|
| 21 |
+
3 Tate Tate PROPN SP _ 1 nsubj _ _
|
| 22 |
+
4 Gallery Gallery PROPN SP _ 3 flat _ _
|
| 23 |
+
5 . . PUNCT FS _ 1 punct _ _
|
| 24 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/duplicate-id.conllu
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: IDs must be sequential integers (1, 2, ...)
|
| 2 |
+
# sent_id = duplicate-id
|
| 3 |
+
# text = valid PUNCT
|
| 4 |
+
1 valid valid NOUN SP _ 0 root _ _
|
| 5 |
+
1 PUNCT . PUNCT FS _ 0 root _ _
|
| 6 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/empty-field.conllu
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sent_id = empty-field
|
| 2 |
+
# text =
|
| 3 |
+
# not valid: no field can be empty.
|
| 4 |
+
1 valid NOUN SP _ 0 root _ _
|
| 5 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/empty-head.conllu
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: HEAD must not be empty
|
| 2 |
+
# sent_id = empty-head
|
| 3 |
+
# text = have
|
| 4 |
+
1 have have VERB VB Tens=Pres root _ _
|
| 5 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/empty-sentence.conllu
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: sentences must contain at least one word.
|
| 2 |
+
# text =
|
| 3 |
+
|
| 4 |
+
# valid one-word sentence.
|
| 5 |
+
# sent_id = test-15
|
| 6 |
+
# text = valid
|
| 7 |
+
1 valid valid NOUN SP _ 0 root _ _
|
| 8 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/extra-empty-line.conllu
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# valid one-word sentence.
|
| 2 |
+
# sent_id = extra-empty-line1
|
| 3 |
+
# text = valid
|
| 4 |
+
1 valid valid NOUN SP _ 0 root _ _
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# format error: sentences must be separated by exactly one empty line
|
| 9 |
+
# valid one-word sentence.
|
| 10 |
+
# sent_id = extra-empty-line2
|
| 11 |
+
# text = valid
|
| 12 |
+
1 valid valid NOUN SP _ 0 root _ _
|
| 13 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/id-starting-from-2.conllu
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sent_id = id-starting-from-2a
|
| 2 |
+
# text = valid
|
| 3 |
+
# valid one-word sentence.
|
| 4 |
+
1 valid valid NOUN SP _ 0 root _ _
|
| 5 |
+
|
| 6 |
+
# sent_id = id-starting-from-2b
|
| 7 |
+
# text = valid
|
| 8 |
+
# not valid: ID must start at 1 for each new sentence
|
| 9 |
+
2 valid valid NOUN SP _ 0 root _ _
|
| 10 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/id-with-extra-0.conllu
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: word IDs must be integers 1, 2, 3, ...
|
| 2 |
+
# text = nonvalid
|
| 3 |
+
# sent_id = id-with-extra-0
|
| 4 |
+
01 nonvalid nonvalid NOUN _ _ 0 root _ _
|
| 5 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/invalid-line.conllu
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# errors = invalid-line
|
| 2 |
+
# sent_id = tanl1
|
| 3 |
+
# text = LONDRA
|
| 4 |
+
1 LONDRA Londra NOUN SP _ 0 root _ _
|
| 5 |
+
- . . PUNCT FS _ 1 punct _ _
|
| 6 |
+
|
| 7 |
+
# sent_id = tanl2
|
| 8 |
+
# text = Gas dalla statua
|
| 9 |
+
# this is comment
|
| 10 |
+
1 Gas gas NOUN S Gender=Masc|Number=Sing 0 root _ _
|
| 11 |
+
2-3 dalla _ _ _ _ _ _ _ _
|
| 12 |
+
2 da da ADP EA _ 4 case _ _
|
| 13 |
+
3 la la DET RD Gender=Fem|Number=Sing|PronType=Art 4 det _ _
|
| 14 |
+
4 statua statua NOUN S Gender=Fem|Number=Sing 1 nmod _ _
|
| 15 |
+
5 . . PUNCT FS _ 1 punct _ _
|
| 16 |
+
|
| 17 |
+
# sent_id = tanl3
|
| 18 |
+
# text = Evacuata la Tate Gallery .
|
| 19 |
+
1 Evacuata evacuare VERB V Gender=Fem|Number=Sing 0 root _ _
|
| 20 |
+
2 la il DET RD Gender=Fem|Number=Sing|PronType=Art 3 det _ _
|
| 21 |
+
3 Tate Tate PROPN SP _ 1 nsubj _ _
|
| 22 |
+
4 Gallery Gallery PROPN SP _ 3 flat _ _
|
| 23 |
+
5 . . PUNCT FS _ 1 punct _ _
|
| 24 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/invalid-range.conllu
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: (first-last) multiword ranges must have first <= last
|
| 2 |
+
# sent_id = invalid-range
|
| 3 |
+
# text = I haven't have not a clue PUNCT
|
| 4 |
+
1 I I PRON PRN Num=Sing|Per=1 2 nsubj _ _
|
| 5 |
+
2-1 haven't _ _ _ _ _ _ _ _
|
| 6 |
+
2 have have VERB VB Tens=Pres 0 root _ _
|
| 7 |
+
3 not not ADV RB _ 2 aux _ _
|
| 8 |
+
4 a a DET DT _ 5 det _ _
|
| 9 |
+
5 clue clue NOUN NN Num=Sing 2 obj _ _
|
| 10 |
+
6 PUNCT . PUNCT PUNCT _ 2 punct _ _
|
| 11 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/invalid-word-id.conllu
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: word IDs must be integers 1, 2, 3, ...
|
| 2 |
+
# text = nonvalid
|
| 3 |
+
# sent_id = id-with-extra-0
|
| 4 |
+
01 nonvalid nonvalid NOUN _ _ 0 root _ _
|
| 5 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/invalid-word-interval.conllu
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: first and last in multiword ranges must be integers
|
| 2 |
+
# sent_id = invalid-range-format
|
| 3 |
+
# text = I haven't have not a clue PUNCT
|
| 4 |
+
1 I I PRON PRN Num=Sing|Per=1 2 nsubj _ _
|
| 5 |
+
2-X haven't _ _ _ _ _ _ _ _
|
| 6 |
+
2 have have VERB VB Tens=Pres 0 root _ _
|
| 7 |
+
3 not not ADV RB _ 2 aux _ _
|
| 8 |
+
4 a a DET DT _ 5 det _ _
|
| 9 |
+
5 clue clue NOUN NN Num=Sing 2 obj _ _
|
| 10 |
+
6 PUNCT . PUNCT PUNCT _ 2 punct _ _
|
| 11 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/misindexed-empty-node.conllu
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sent_id = misindexed-empty-node
|
| 2 |
+
# text = This holes
|
| 3 |
+
# not valid: empty nodes indexed i.1, i.2, etc. after word with index i
|
| 4 |
+
1 This this PRON _ _ 0 root 2.2:nsubj _
|
| 5 |
+
2.2 _ _ _ _ _ _ _ 0:root _
|
| 6 |
+
2 holes hole NOUN _ _ 1 orphan 2.2:obj _
|
| 7 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/misordered-multiword.conllu
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: multiword tokens must appear before the first word in their
|
| 2 |
+
# range
|
| 3 |
+
# sent_id = misordered-multiword
|
| 4 |
+
# text = I have haven't a clue PUNCT
|
| 5 |
+
1 I I PRON PRN Num=Sing|Per=1 2 nsubj _ _
|
| 6 |
+
2 have have VERB VB Tens=Pres 0 root _ _
|
| 7 |
+
2-3 haven't _ _ _ _ _ _ _ _
|
| 8 |
+
3 not not ADV RB _ 2 aux _ _
|
| 9 |
+
4 a a DET DT _ 5 det _ _
|
| 10 |
+
5 clue clue NOUN NN Num=Sing 2 obj _ _
|
| 11 |
+
6 PUNCT . PUNCT PUNCT _ 2 punct _ _
|
| 12 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-comment-end.conllu
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: comment lines should precede a sentence
|
| 2 |
+
# sent_id = misplaced-comment-end
|
| 3 |
+
# text = I haven't a clue PUNCT
|
| 4 |
+
1 I I PRON PRN Num=Sing|Per=1 2 nsubj _ _
|
| 5 |
+
2-3 haven't _ _ _ _ _ _ _ _
|
| 6 |
+
2 have have VERB VB Tens=Pres 0 root _ _
|
| 7 |
+
3 not not ADV RB _ 2 aux _ _
|
| 8 |
+
4 a a DET DT _ 5 det _ _
|
| 9 |
+
5 clue clue NOUN NN Num=Sing 2 obj _ _
|
| 10 |
+
6 PUNCT . PUNCT PUNCT _ 2 punct _ _
|
| 11 |
+
|
| 12 |
+
# this comment should not be here as it does not precede a sentence.
|
| 13 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-comment-mid.conllu
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: comment lines inside sentences are disallowed.
|
| 2 |
+
# sent_id = misplaced-comment-mid
|
| 3 |
+
# text = I haven't a clue PUNCT
|
| 4 |
+
1 I I PRON PRN Num=Sing|Per=1 2 nsubj _ _
|
| 5 |
+
2-3 haven't _ _ _ _ _ _ _ _
|
| 6 |
+
# this comment should not be here
|
| 7 |
+
2 have have VERB VB Tens=Pres 0 root _ _
|
| 8 |
+
3 not not ADV RB _ 2 aux _ _
|
| 9 |
+
4 a a DET DT _ 5 det _ _
|
| 10 |
+
5 clue clue NOUN NN Num=Sing 2 obj _ _
|
| 11 |
+
6 PUNCT . PUNCT PUNCT _ 2 punct _ _
|
| 12 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-comment.conllu
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sent_id = tanl1
|
| 2 |
+
# text = LONDRA .
|
| 3 |
+
1 LONDRA Londra NOUN SP _ 0 root _ _
|
| 4 |
+
# mispaced comment
|
| 5 |
+
2 . . PUNCT FS _ 1 punct _ _
|
| 6 |
+
|
| 7 |
+
# sent_id = tanl2
|
| 8 |
+
# text = Gas dalla statua .
|
| 9 |
+
# this is comment
|
| 10 |
+
1 Gas gas NOUN S Gender=Masc|Number=Sing 0 root _ _
|
| 11 |
+
2-3 dalla _ _ _ _ _ _ _ _
|
| 12 |
+
2 da da ADP _ _ 4 case _ _
|
| 13 |
+
3 la la DET RD Gender=Fem|Number=Sing|PronType=Art 4 det _ _
|
| 14 |
+
4 statua statua NOUN S Gender=Fem|Number=Sing 1 nmod _ _
|
| 15 |
+
5 . . PUNCT FS _ 1 punct _ _
|
| 16 |
+
|
| 17 |
+
# sent_id = tanl3
|
| 18 |
+
# text = Evacuata la Tate Gallery .
|
| 19 |
+
1 Evacuata evacuare VERB V Gender=Fem|Number=Sing 0 root _ _
|
| 20 |
+
2 la il DET RD Gender=Fem|Number=Sing|PronType=Art 3 det _ _
|
| 21 |
+
3 Tate Tate PROPN SP _ 1 nsubj _ _
|
| 22 |
+
4 Gallery Gallery PROPN SP _ 3 flat _ _
|
| 23 |
+
5 . . PUNCT FS _ 1 punct _ _
|
| 24 |
+
|
ud-tools/udtools/tests/test-cases/invalid-level1/misplaced-empty-node-2.conllu
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# not valid: multiword tokens must appear before the first word in their
|
| 2 |
+
# range
|
| 3 |
+
# sent_id = misordered-empty
|
| 4 |
+
# text = I have haven't a clue PUNCT
|
| 5 |
+
1 I I PRON PRN Num=Sing|Per=1 2 nsubj _ _
|
| 6 |
+
2-3 haven't _ _ _ _ _ _ _ _
|
| 7 |
+
1.1 _ _ _ _ _ _ _ _ _
|
| 8 |
+
2 have have VERB VB Tens=Pres 0 root _ _
|
| 9 |
+
3 not not ADV RB _ 2 aux _ _
|
| 10 |
+
4 a a DET DT _ 5 det _ _
|
| 11 |
+
5 clue clue NOUN NN Num=Sing 2 obj _ _
|
| 12 |
+
6 PUNCT . PUNCT PUNCT _ 2 punct _ _
|
| 13 |
+
|