Datasets:
File size: 6,393 Bytes
3629965 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 | """Convert Label Studio JSON export to BIO format.
Reads a Label Studio JSON export (with annotations), converts span
annotations back to syllable-level BIO tags, and writes BIO format
compatible with tree-1's load_data_vlsp2013().
Usage:
uv run src/ls_export_ws.py --input ls_export_cycle1.json --output gold_ws_cycle1.txt
"""
import argparse
import json
import sys
import unicodedata
from pathlib import Path
def nfc(text):
"""Normalize text to NFC for consistent Unicode handling."""
return unicodedata.normalize("NFC", text)
def text_to_syllable_offsets(text):
"""Compute (start, end) character offsets for each space-delimited syllable.
Returns list of (start, end) tuples.
"""
offsets = []
pos = 0
for syl in text.split(" "):
start = pos
end = pos + len(syl)
offsets.append((start, end))
pos = end + 1 # skip space
return offsets
def spans_to_bio(spans, text):
"""Convert Label Studio span annotations to syllable-level BIO tags.
Args:
spans: list of span annotation dicts (from Label Studio result)
text: the original text string
Returns:
(syllables, tags) tuple
"""
syllables = text.split(" ")
syl_offsets = text_to_syllable_offsets(text)
n = len(syllables)
tags = [None] * n
# Sort spans by start offset
sorted_spans = sorted(spans, key=lambda s: s["value"]["start"])
for span in sorted_spans:
span_start = span["value"]["start"]
span_end = span["value"]["end"]
# Find syllables covered by this span
first_syl = None
for i, (s, e) in enumerate(syl_offsets):
if s >= span_start and e <= span_end:
if first_syl is None:
first_syl = i
tags[i] = "B-W"
else:
tags[i] = "I-W"
return syllables, tags
def validate_bio(syllables, tags, sent_id):
"""Validate BIO tags: all syllables tagged, no gaps."""
errors = []
for i, (syl, tag) in enumerate(zip(syllables, tags)):
if tag is None:
errors.append(f"Syllable {i} ({syl!r}) has no tag")
elif tag not in ("B-W", "I-W"):
errors.append(f"Syllable {i} ({syl!r}) has invalid tag {tag!r}")
if tags and tags[0] == "I-W":
errors.append("First syllable has I-W tag (should be B-W)")
for i in range(1, len(tags)):
if tags[i] == "I-W" and tags[i - 1] is None:
errors.append(f"I-W at position {i} follows untagged syllable")
if errors:
return [f"[{sent_id}] {e}" for e in errors]
return []
def get_annotations(task):
"""Extract the best annotation result from a task.
Prefers completed annotations over predictions.
For annotations, takes the most recent one.
"""
# Check for human annotations first
annotations = task.get("annotations", [])
if annotations:
# Filter to completed annotations
completed = [a for a in annotations if not a.get("was_cancelled", False)]
if completed:
# Take the most recent
best = max(completed, key=lambda a: a.get("updated_at", ""))
return best.get("result", [])
# Fall back to predictions
predictions = task.get("predictions", [])
if predictions:
return predictions[0].get("result", [])
return None
def main():
parser = argparse.ArgumentParser(
description="Convert Label Studio JSON export to BIO format"
)
parser.add_argument(
"--input",
required=True,
help="Path to Label Studio JSON export",
)
parser.add_argument(
"--output",
required=True,
help="Output BIO file path",
)
parser.add_argument(
"--require-annotation",
action="store_true",
help="Only include tasks with human annotations (skip prediction-only)",
)
args = parser.parse_args()
# Read export
with open(args.input, "r", encoding="utf-8") as f:
tasks = json.load(f)
print(f"Loaded {len(tasks)} tasks from {args.input}")
sentences = []
errors = []
n_annotated = 0
n_prediction_only = 0
for task in tasks:
data = task.get("data", {})
text = nfc(data.get("text", ""))
sent_id = data.get("sent_id", "unknown")
if not text:
errors.append(f"[{sent_id}] Empty text")
continue
# Get annotations
annotations = task.get("annotations", [])
completed = [a for a in annotations if not a.get("was_cancelled", False)]
has_human = len(completed) > 0
if args.require_annotation and not has_human:
n_prediction_only += 1
continue
result = get_annotations(task)
if result is None:
errors.append(f"[{sent_id}] No annotations or predictions")
continue
if has_human:
n_annotated += 1
else:
n_prediction_only += 1
# Filter to label spans only
spans = [r for r in result if r.get("type") == "labels"]
# Convert to BIO
syllables, tags = spans_to_bio(spans, text)
# Validate
bio_errors = validate_bio(syllables, tags, sent_id)
if bio_errors:
errors.extend(bio_errors)
continue
sentences.append({
"sent_id": sent_id,
"text": text,
"syllables": syllables,
"tags": tags,
})
# Report
print(f"Human-annotated: {n_annotated}")
print(f"Prediction-only: {n_prediction_only}")
if errors:
print(f"\n{len(errors)} errors:")
for e in errors:
print(f" - {e}")
print(f"\nConverted {len(sentences)} sentences")
# Sort by sent_id for deterministic output
sentences.sort(key=lambda s: s["sent_id"])
# Write BIO
output_path = Path(args.output)
with open(output_path, "w", encoding="utf-8") as f:
for sent in sentences:
f.write(f"# sent_id = {sent['sent_id']}\n")
f.write(f"# text = {sent['text']}\n")
for syl, tag in zip(sent["syllables"], sent["tags"]):
f.write(f"{syl}\t{tag}\n")
f.write("\n")
print(f"Written to {output_path}")
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
|