File size: 4,131 Bytes
1f4dedc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""

Convert Brat format annotations to JSONL format for NER training.



Author: Amir Safari

Date: 17.10.2025



This script processes Brat annotation files (.ann and .txt) from train/dev/test

directories and converts them into JSONL format suitable for NER model training.

"""
import json
import re
from pathlib import Path

print("Starting data conversion from Brat format to JSON Lines...")

# Tag mapping: Create a dictionary to convert tag names to integer IDs
NER_TAGS = [
    "O", "B-Taxon", "I-Taxon", "B-Geographical_Location", "I-Geographical_Location",
    "B-Habitat", "I-Habitat", "B-Temporal_Expression", "I-Temporal_Expression",
    "B-Person", "I-Person",
]

# Create a mapping from tag name to integer ID
tag2id = {tag: i for i, tag in enumerate(NER_TAGS)}

# Process each split directory (train, dev, test)
for split in ["train", "dev", "test"]:
    print(f"\nProcessing '{split}' split...")
    input_dir = Path(split)
    output_file = f"{split}.jsonl"

    if not input_dir.exists():
        # Skip if directory doesn't exist
        print(f"Directory not found: {input_dir}. Skipping split.")
        continue

    with open(output_file, "w", encoding="utf-8") as outfile:
        # Find all .ann files and process them with their corresponding .txt files
        ann_files = sorted(input_dir.glob("*.ann"))
        for ann_file in ann_files:
            txt_file = ann_file.with_suffix(".txt")
            if not txt_file.exists():
                continue

            with open(txt_file, "r", encoding="utf-8") as f:
                # Tokenize the text by finding all non-whitespace sequences
                text = f.read()

            tokens_with_spans = [{"text": match.group(0), "start": match.start(), "end": match.end()} for match in
                                 re.finditer(r'\S+', text)]
            if not tokens_with_spans:
                continue

            tokens = [t["text"] for t in tokens_with_spans]
            ner_tags = ["O"] * len(tokens)
            # Parse the .ann file to extract entity annotations
            with open(ann_file, "r", encoding="utf-8") as f:
                annotations = []
                # Apply BIO tagging scheme to tokens based on character span overlaps
                for line in f:
                    if not line.startswith("T"): continue
                    parts = line.strip().split("\t")
                    if len(parts) < 2: continue
                    tag_info = parts[1]
                    tag_parts = tag_info.split(" ")
                    label = tag_parts[0].replace(" ", "_")
                    spans_str = " ".join(tag_parts[1:])
                    char_spans = []

                    for span_part in spans_str.split(';'):
                        try:
                            start, end = map(int, span_part.split(' '))
                            char_spans.append((start, end))
                        except ValueError:
                            continue
                    if char_spans:
                        annotations.append({"label": label, "spans": char_spans})

            for ann in annotations:
                is_first_token = True
                for start_char, end_char in ann["spans"]:
                    for i, token in enumerate(tokens_with_spans):
                        if token["start"] < end_char and token["end"] > start_char:
                            ner_tags[i] = f"B-{ann['label']}" if is_first_token else f"I-{ann['label']}"
                            is_first_token = False

            # Convert tag strings to integer IDs for model compatibility
            ner_tag_ids = [tag2id.get(tag, tag2id["O"]) for tag in ner_tags]

            # Write the processed example as a single JSON line
            json_line = json.dumps({
                "id": txt_file.stem,
                "tokens": tokens,
                "ner_tags": ner_tag_ids
            })
            outfile.write(json_line + "\n")

    print(f"Successfully created {output_file}")

print("\nConversion complete! ✨")