UDD-1 / src /ls_import_ws.py
rain1024's picture
Restructure technical report to ACL format, add Phase 0 gold eval methodology
bbc68b5
# /// script
# requires-python = ">=3.9"
# dependencies = ["python-crfsuite"]
# ///
"""Convert AL Cycle 0 top-500 TSV + BIO files to Label Studio JSON.
Reads al_cycle0_top500.tsv to identify which sentences to annotate,
looks up their silver BIO tags from udd-ws-v1.1-{dev,test}.txt,
and produces a Label Studio import JSON with pre-annotations.
When --model is provided, computes per-span confidence scores from CRF
marginal probabilities. Each word span gets score = min confidence across
its syllables, so words with any uncertain boundary get a low score.
When --dict is provided (standalone or auto-detected from model dir),
adds meta.dict to each span (shown in Region Details when selected).
Usage:
uv run src/ls_import_ws.py [--validate]
uv run src/ls_import_ws.py --dict path/to/dictionary.txt
uv run src/ls_import_ws.py --model path/to/model.crfsuite
"""
import argparse
import json
import sys
import unicodedata
from pathlib import Path
def parse_bio_file(filepath):
"""Parse BIO file into list of sentences.
Returns list of dicts with keys: sent_id, text, syllables, tags.
Reused from fix_ws_errors.py.
"""
sentences = []
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip("\n")
if line.startswith("# sent_id = "):
current["sent_id"] = line.split("= ", 1)[1]
continue
if line.startswith("# text = "):
current["text"] = line.split("= ", 1)[1]
continue
if line.startswith("#"):
continue
if not line:
if current["syllables"]:
sentences.append(dict(current))
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
continue
parts = line.split("\t")
if len(parts) == 2:
current["syllables"].append(parts[0])
current["tags"].append(parts[1])
if current["syllables"]:
sentences.append(dict(current))
return sentences
def parse_tsv(filepath):
"""Parse al_cycle0_top500.tsv.
Returns list of dicts with keys: rank, file, sent_idx, score, text.
"""
rows = []
with open(filepath, "r", encoding="utf-8") as f:
header = f.readline().rstrip("\n").split("\t")
for line in f:
line = line.rstrip("\n")
if not line:
continue
fields = line.split("\t")
row = dict(zip(header, fields))
rows.append({
"rank": int(row["rank"]),
"file": row["file"],
"sent_idx": int(row["sent_idx"]),
"score": float(row["score"]),
"text": row["text"],
})
return rows
def nfc(text):
"""Normalize text to NFC for consistent Unicode handling."""
return unicodedata.normalize("NFC", text)
def confidence_to_label(conf):
"""Map confidence score to label: WH (>=0.8), WM (0.6-0.8), WL (<0.6)."""
if conf >= 0.8:
return "WH"
elif conf >= 0.6:
return "WM"
else:
return "WL"
def bio_to_spans(syllables, tags, text, confidences=None):
"""Convert BIO tags to Label Studio span annotations.
Args:
syllables: list of syllable strings
tags: list of BIO tags (B-W / I-W)
text: space-joined text string
confidences: optional list of per-syllable confidence floats
Returns:
list of span dicts for Label Studio predictions
"""
spans = []
span_start = None
span_syllables = []
span_syl_indices = []
char_pos = 0
def _close_span():
span_text = " ".join(span_syllables)
# Choose label based on confidence: WH/WM/WL for predictions, W if no confidence
if confidences is not None:
span_conf = min(confidences[j] for j in span_syl_indices)
label = confidence_to_label(span_conf)
else:
label = "W"
span_dict = {
"id": f"s{len(spans)}",
"from_name": "label",
"to_name": "text",
"type": "labels",
"value": {
"start": span_start,
"end": span_start + len(span_text),
"text": span_text,
"labels": [label],
},
}
if confidences is not None:
span_dict["score"] = round(span_conf, 4)
spans.append(span_dict)
for i, (syl, tag) in enumerate(zip(syllables, tags)):
# Verify syllable matches text at expected position
expected = text[char_pos:char_pos + len(syl)]
if nfc(expected) != nfc(syl):
raise ValueError(
f"Syllable mismatch at pos {char_pos}: "
f"expected {syl!r}, got {expected!r} in text {text!r}"
)
if tag == "B-W":
# Close previous span
if span_syllables:
_close_span()
span_start = char_pos
span_syllables = [syl]
span_syl_indices = [i]
elif tag == "I-W":
span_syllables.append(syl)
span_syl_indices.append(i)
else:
raise ValueError(f"Unknown tag {tag!r} for syllable {syl!r}")
char_pos += len(syl)
# Skip space between syllables
if i < len(syllables) - 1:
char_pos += 1 # space separator
# Close final span
if span_syllables:
_close_span()
return spans
def add_dict_meta(spans, dictionary):
"""Add dictionary lookup metadata to each span.
When the annotator selects a region, LS shows the meta fields in
the Region Details panel. This provides instant dictionary feedback.
For multi-syllable spans: shows dict status + up to 5 similar entries.
For single-syllable spans: shows dict status only.
"""
for span in spans:
span_text = span["value"]["text"]
normalized = nfc(span_text.lower().strip())
n_syllables = span_text.count(" ") + 1
in_dict = normalized in dictionary
if n_syllables == 1:
span["meta"] = {"dict": "✓" if in_dict else "—"}
else:
if in_dict:
span["meta"] = {"dict": "✓ in dict"}
else:
# Find similar entries (prefix match on first syllable)
first_syl = normalized.split()[0]
similar = sorted(
e for e in dictionary
if e.startswith(first_syl + " ") and e != normalized
)[:5]
meta = {"dict": "✗ not found"}
if similar:
meta["similar"] = ", ".join(similar)
span["meta"] = meta
def validate_spans(spans, text, syllables):
"""Validate that spans cover every character in text without gaps/overlaps."""
text_len = len(text)
covered = [False] * text_len
for span in spans:
start = span["value"]["start"]
end = span["value"]["end"]
for j in range(start, end):
if covered[j]:
raise ValueError(f"Overlap at char {j} in text {text!r}")
covered[j] = True
# Check that all non-space characters are covered
for j, ch in enumerate(text):
if ch != " " and not covered[j]:
raise ValueError(f"Gap at char {j} ({ch!r}) in text {text!r}")
# Check span count matches word count
n_words = sum(1 for t in syllables if t == "B-W") # tags not syllables
# Actually count from spans
if len(spans) != n_words:
raise ValueError(
f"Span count {len(spans)} != word count {n_words} in text {text!r}"
)
def load_dictionary(dict_path):
"""Load dictionary file into a set of NFC-normalized entries.
Returns set of lowercase NFC-normalized dictionary entries.
"""
dictionary = set()
with open(dict_path, encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
dictionary.add(nfc(line.lower()))
print(f"Dictionary: {len(dictionary)} entries from {dict_path}")
return dictionary
def load_crf_scorer(model_path):
"""Load CRF tagger for confidence scoring.
Returns tagger object.
"""
import pycrfsuite
tagger = pycrfsuite.Tagger()
tagger.open(str(model_path))
return tagger
def compute_confidences(tagger, syllables, dictionary=None):
"""Compute per-syllable confidence from CRF marginal probabilities.
Confidence = max(P(B|x,i), P(I|x,i)) for each syllable position.
Uses feature extraction from al_score_ws.py.
"""
# Import feature extraction (same as al_score_ws.py)
sys.path.insert(0, str(Path(__file__).resolve().parent))
from al_score_ws import sentence_to_features
xseq = sentence_to_features(syllables, dictionary)
tagger.set(xseq)
confidences = []
for i in range(len(syllables)):
p_b = tagger.marginal("B", i)
p_i = tagger.marginal("I", i)
confidences.append(max(p_b, p_i))
return confidences
def main():
parser = argparse.ArgumentParser(
description="Convert AL top-500 + BIO to Label Studio JSON"
)
parser.add_argument(
"--tsv",
default="al_cycle0_top500.tsv",
help="Path to ranked TSV file (default: al_cycle0_top500.tsv)",
)
parser.add_argument(
"--bio-dir",
default=".",
help="Directory containing udd-ws-v1.1-{dev,test}.txt (default: .)",
)
parser.add_argument(
"--output",
default="ls_import_cycle1.json",
help="Output JSON path (default: ls_import_cycle1.json)",
)
parser.add_argument(
"--validate",
action="store_true",
help="Run validation checks on all tasks",
)
parser.add_argument(
"--model",
default=None,
help="Path to CRF model (.crfsuite) for per-span confidence scores",
)
parser.add_argument(
"--dict",
default=None,
help="Path to dictionary.txt (auto-detected from model dir if not set)",
)
args = parser.parse_args()
root = Path(args.bio_dir)
# Load dictionary (standalone or auto-detected from model dir)
dictionary = None
if args.dict:
dictionary = load_dictionary(args.dict)
elif args.model:
auto_dict = Path(args.model).parent / "dictionary.txt"
if auto_dict.exists():
dictionary = load_dictionary(auto_dict)
# Load CRF scorer if model provided
tagger = None
if args.model:
model_path = Path(args.model)
print(f"Loading CRF model: {model_path}")
tagger = load_crf_scorer(model_path)
print("Per-span confidence scoring enabled")
# Parse TSV
tsv_rows = parse_tsv(args.tsv)
print(f"Loaded {len(tsv_rows)} rows from {args.tsv}")
# Parse BIO files and build text-based index
bio_by_text = {}
for split in ("dev", "test", "train"):
bio_path = root / f"udd-ws-v1.1-{split}.txt"
if not bio_path.exists():
continue
sentences = parse_bio_file(bio_path)
print(f"Loaded {len(sentences)} sentences from {bio_path}")
for sent in sentences:
key = nfc(" ".join(sent["syllables"]))
bio_by_text[key] = sent
print(f"Text index: {len(bio_by_text)} unique sentences")
# Build tasks
tasks = []
errors = []
for row in tsv_rows:
tsv_text = nfc(row["text"])
sent = bio_by_text.get(tsv_text)
if sent is None:
errors.append(f"Rank {row['rank']}: text not found in BIO files")
continue
text = tsv_text
# Compute per-syllable confidence if CRF model available
confidences = None
if tagger is not None:
confidences = compute_confidences(
tagger, sent["syllables"], dictionary
)
# Convert BIO to spans
try:
spans = bio_to_spans(
sent["syllables"], sent["tags"], text, confidences
)
except ValueError as e:
errors.append(f"Rank {row['rank']}: span conversion error: {e}")
continue
# Validate span coverage
if args.validate:
try:
validate_spans(spans, text, sent["tags"])
except ValueError as e:
errors.append(f"Rank {row['rank']}: validation error: {e}")
continue
# Task-level prediction score = 1 - AL uncertainty score
pred_score = round(1.0 - row["score"], 6)
# Add dictionary metadata to spans
if dictionary:
add_dict_meta(spans, dictionary)
task = {
"data": {
"text": text,
"sent_id": sent["sent_id"],
"rank": row["rank"],
},
"predictions": [{
"model_version": "silver_crf_v1.1",
"score": pred_score,
"result": spans,
}],
}
tasks.append(task)
# Report
if errors:
print(f"\n{len(errors)} errors:")
for e in errors:
print(f" - {e}")
print(f"\nGenerated {len(tasks)} tasks")
if tagger is not None:
# Report confidence statistics
all_scores = []
for t in tasks:
for r in t["predictions"][0]["result"]:
if "score" in r:
all_scores.append(r["score"])
if all_scores:
low = sum(1 for s in all_scores if s < 0.8)
print(f"Span confidence: min={min(all_scores):.4f}, "
f"mean={sum(all_scores)/len(all_scores):.4f}, "
f"<0.8: {low}/{len(all_scores)}")
# Write output
output_path = Path(args.output)
with open(output_path, "w", encoding="utf-8") as f:
json.dump(tasks, f, ensure_ascii=False, indent=2)
print(f"Written to {output_path}")
if errors:
sys.exit(1)
if __name__ == "__main__":
main()