UDD-1 / src /ls_export_ws.py
rain1024's picture
Add Label Studio annotation tooling and dictionary plugins for AL Cycle 1
3629965
"""Convert Label Studio JSON export to BIO format.
Reads a Label Studio JSON export (with annotations), converts span
annotations back to syllable-level BIO tags, and writes BIO format
compatible with tree-1's load_data_vlsp2013().
Usage:
uv run src/ls_export_ws.py --input ls_export_cycle1.json --output gold_ws_cycle1.txt
"""
import argparse
import json
import sys
import unicodedata
from pathlib import Path
def nfc(text):
"""Normalize text to NFC for consistent Unicode handling."""
return unicodedata.normalize("NFC", text)
def text_to_syllable_offsets(text):
"""Compute (start, end) character offsets for each space-delimited syllable.
Returns list of (start, end) tuples.
"""
offsets = []
pos = 0
for syl in text.split(" "):
start = pos
end = pos + len(syl)
offsets.append((start, end))
pos = end + 1 # skip space
return offsets
def spans_to_bio(spans, text):
"""Convert Label Studio span annotations to syllable-level BIO tags.
Args:
spans: list of span annotation dicts (from Label Studio result)
text: the original text string
Returns:
(syllables, tags) tuple
"""
syllables = text.split(" ")
syl_offsets = text_to_syllable_offsets(text)
n = len(syllables)
tags = [None] * n
# Sort spans by start offset
sorted_spans = sorted(spans, key=lambda s: s["value"]["start"])
for span in sorted_spans:
span_start = span["value"]["start"]
span_end = span["value"]["end"]
# Find syllables covered by this span
first_syl = None
for i, (s, e) in enumerate(syl_offsets):
if s >= span_start and e <= span_end:
if first_syl is None:
first_syl = i
tags[i] = "B-W"
else:
tags[i] = "I-W"
return syllables, tags
def validate_bio(syllables, tags, sent_id):
"""Validate BIO tags: all syllables tagged, no gaps."""
errors = []
for i, (syl, tag) in enumerate(zip(syllables, tags)):
if tag is None:
errors.append(f"Syllable {i} ({syl!r}) has no tag")
elif tag not in ("B-W", "I-W"):
errors.append(f"Syllable {i} ({syl!r}) has invalid tag {tag!r}")
if tags and tags[0] == "I-W":
errors.append("First syllable has I-W tag (should be B-W)")
for i in range(1, len(tags)):
if tags[i] == "I-W" and tags[i - 1] is None:
errors.append(f"I-W at position {i} follows untagged syllable")
if errors:
return [f"[{sent_id}] {e}" for e in errors]
return []
def get_annotations(task):
"""Extract the best annotation result from a task.
Prefers completed annotations over predictions.
For annotations, takes the most recent one.
"""
# Check for human annotations first
annotations = task.get("annotations", [])
if annotations:
# Filter to completed annotations
completed = [a for a in annotations if not a.get("was_cancelled", False)]
if completed:
# Take the most recent
best = max(completed, key=lambda a: a.get("updated_at", ""))
return best.get("result", [])
# Fall back to predictions
predictions = task.get("predictions", [])
if predictions:
return predictions[0].get("result", [])
return None
def main():
parser = argparse.ArgumentParser(
description="Convert Label Studio JSON export to BIO format"
)
parser.add_argument(
"--input",
required=True,
help="Path to Label Studio JSON export",
)
parser.add_argument(
"--output",
required=True,
help="Output BIO file path",
)
parser.add_argument(
"--require-annotation",
action="store_true",
help="Only include tasks with human annotations (skip prediction-only)",
)
args = parser.parse_args()
# Read export
with open(args.input, "r", encoding="utf-8") as f:
tasks = json.load(f)
print(f"Loaded {len(tasks)} tasks from {args.input}")
sentences = []
errors = []
n_annotated = 0
n_prediction_only = 0
for task in tasks:
data = task.get("data", {})
text = nfc(data.get("text", ""))
sent_id = data.get("sent_id", "unknown")
if not text:
errors.append(f"[{sent_id}] Empty text")
continue
# Get annotations
annotations = task.get("annotations", [])
completed = [a for a in annotations if not a.get("was_cancelled", False)]
has_human = len(completed) > 0
if args.require_annotation and not has_human:
n_prediction_only += 1
continue
result = get_annotations(task)
if result is None:
errors.append(f"[{sent_id}] No annotations or predictions")
continue
if has_human:
n_annotated += 1
else:
n_prediction_only += 1
# Filter to label spans only
spans = [r for r in result if r.get("type") == "labels"]
# Convert to BIO
syllables, tags = spans_to_bio(spans, text)
# Validate
bio_errors = validate_bio(syllables, tags, sent_id)
if bio_errors:
errors.extend(bio_errors)
continue
sentences.append({
"sent_id": sent_id,
"text": text,
"syllables": syllables,
"tags": tags,
})
# Report
print(f"Human-annotated: {n_annotated}")
print(f"Prediction-only: {n_prediction_only}")
if errors:
print(f"\n{len(errors)} errors:")
for e in errors:
print(f" - {e}")
print(f"\nConverted {len(sentences)} sentences")
# Sort by sent_id for deterministic output
sentences.sort(key=lambda s: s["sent_id"])
# Write BIO
output_path = Path(args.output)
with open(output_path, "w", encoding="utf-8") as f:
for sent in sentences:
f.write(f"# sent_id = {sent['sent_id']}\n")
f.write(f"# text = {sent['text']}\n")
for syl, tag in zip(sent["syllables"], sent["tags"]):
f.write(f"{syl}\t{tag}\n")
f.write("\n")
print(f"Written to {output_path}")
if errors:
sys.exit(1)
if __name__ == "__main__":
main()