File size: 11,136 Bytes
beba983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46ef82b
beba983
9ec39bd
 
 
 
 
 
beba983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32246fa
 
 
 
 
 
 
d50a24b
32246fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
beba983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a97b018
 
 
 
 
 
 
 
beba983
 
 
 
 
 
 
 
 
 
30c4857
9ec39bd
30c4857
6a8f90f
 
601ed5c
 
6a8f90f
23cdd1f
 
 
30c4857
23cdd1f
 
 
 
6a8f90f
30c4857
 
601ed5c
30c4857
6a8f90f
 
0f68cb0
 
6a8f90f
 
30c4857
 
 
 
 
 
 
 
 
6a8f90f
 
a97b018
beba983
 
 
 
6a8f90f
beba983
 
 
2bf7301
beba983
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
# simord_hf_loader.py
# HF Datasets loader that uses mediqa-oe.data.process_data.attach_transcript_section
# to merge transcripts into local annotations: train, test1, test2.
#
# Dataset license: CDLA-2.0-permissive (for SIMORD). Respect upstream source licenses.
#
# Requirements:
#   pip install datasets nltk requests
#   pip install -e /path/to/mediqa-oe   # or add repo to PYTHONPATH
#
# Usage:
#   from datasets import load_dataset
#   ds = load_dataset(
#       "path/to/simord_hf_loader.py",
#       data_dir="path/to/annotations_dir",  # with train.json, test1.json, test2.json (lists of dicts)
#   )
#   print(ds)
#
# The loader will auto-download ACI-Bench and PriMock57 from GitHub to create transcript_dict.

import json
import os
import re
from typing import Dict, Iterable, List, Tuple

import datasets

# ---- Import the exact merge function from your repo ----
# (Relies on your local mediqa-oe being importable)
from mediqa_oe.data.process_data import attach_transcript_section  # noqa: E402

# ---- Sources (same as your script) ----
ACI_BENCH_URL = "https://github.com/wyim/aci-bench/archive/refs/heads/main.zip"
PRIMOCK_URL = "https://github.com/babylonhealth/primock57/archive/refs/heads/main.zip"

_DESCRIPTION = """\
SIMORD loader that merges transcripts from ACI-Bench and PriMock57 into local annotations
using mediqa-oe.data.process_data.attach_transcript_section.
"""
_CITATION = r"""@article{corbeil2025empowering,
  title={Empowering Healthcare Practitioners with Language Models: Structuring Speech Transcripts in Two Real-World Clinical Applications},
  author={Corbeil, Jean-Philippe and Ben Abacha, Asma and Michalopoulos, George and Swazinna, Patrick and Del-Agua, Miguel and Tremblay, Julien and Jeeson Daniel, Aju and Bader, Corey and Cho, Yoon-Chan and Krishnan, Parvathi and Bodenstab, Nathan and Lin, Tony and Teng, Wen and Beaulieu, Francois and Vozila, Paul},
  journal={arXiv preprint arXiv:2507.05517},
  year={2025}
}"""
_LICENSE = "CDLA-2.0-permissive"
_HOMEPAGE = "https://huggingface.co/datasets/microsoft/SIMORD"

DATA_URLS = {
    "train": "https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/train.json",
    "dev":   "https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/dev.json",
    "test":  "https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/test.json",
}

# ----------------------- helpers to read upstream transcripts -----------------------

def _walk_json_files(directory: str) -> List[str]:
    out = []
    for d, _, files in os.walk(directory):
        for fn in files:
            if fn.lower().endswith(".json"):
                out.append(os.path.join(d, fn))
    out.sort()
    return out

def _read_json_records(path: str) -> Iterable[dict]:
    with open(path, "r", encoding="utf-8") as f:
        data = json.load(f)
    if isinstance(data, dict) and "data" in data and isinstance(data["data"], list):
        for r in data["data"]:
            yield r
    elif isinstance(data, list):
        for r in data:
            yield r
    else:
        # single dict record
        yield data

def _normalize_id_from_aci(file_field: str, basename: str) -> str:
    # matches your script:
    # file_id = "_".join(d.get("file", "").split("-")[0:2])
    # transcript_id = "acibench_" + file_id + "_" + basename
    file_id = "_".join((file_field or "").split("-")[0:2])
    return f"acibench_{file_id}_{basename}"

def _build_aci_transcript_dict(root: str) -> Dict[str, dict]:
    """
    Mirror of read_aci_bench_data + walk_aci_bench_directory:
      looks in .../aci-bench-main/data/challenge_data_json and .../src_experiment_data_json
      and builds {transcript_id: {"transcript": <src_text>}}
    """
    tdict: Dict[str, dict] = {}
    base = None
    # Find the 'aci-bench-main' folder inside root
    for name in os.listdir(root):
        if name.startswith("aci-bench"):
            base = os.path.join(root, name)
            break
    if not base:
        return tdict

    for sub in ("data/challenge_data_json", "data/src_experiment_data_json"):
        p = os.path.join(base, sub)
        if not os.path.isdir(p):
            continue
        for fp in _walk_json_files(p):
            basename = os.path.splitext(os.path.basename(fp))[0]
            for rec in _read_json_records(fp):
                src = rec.get("src")
                file_field = rec.get("file", "")
                tid = _normalize_id_from_aci(file_field, basename)
                if src:
                    tdict[tid] = {"transcript": src}
    return tdict

def _read_text(path: str) -> str:
    with open(path, "r", encoding="utf-8") as f:
        return f.read()

def _normalize_primock_id(stem: str) -> str:
    # replicate replacements used in your script:
    # primock_id = filename.replace("day", "primock57_")
    # primock_id = primock_id.replace("consultation0", "")
    # primock_id = primock_id.replace("consultation", "")
    s = stem
    s = s.replace("day", "primock57_")
    s = s.replace("consultation0", "")
    s = s.replace("consultation", "")
    return s

def _build_primock_transcript_dict(primock_path):
    # run primock txtgrid_to_transcript.py
    script_path = os.path.join(primock_path, "primock57-main", "scripts", "textgrid_to_transcript.py")
    if not os.path.exists(script_path):
        print(f"Script {script_path} does not exist. Skipping Primock data.")
        return
    transcript_path = os.path.join(primock_path, "primock57-main", "transcripts")
    primock_transcript_path = os.path.join(primock_path, "primock_transcript")
    os.system(f"python {script_path} --transcript_path {transcript_path} --output_path {primock_transcript_path}")
    print(f"Primock data saved to {primock_transcript_path}")

    # walk through the directory and find all json files
    transcript_data = {}
    for dirpath, _, files in os.walk(primock_transcript_path):
        for filename in files:
            if filename.endswith(".txt"):
                file_path = os.path.join(dirpath, filename)
                with open(file_path, 'r') as f:
                    primock_id = filename.replace(".txt", "")
                    primock_id = primock_id.replace("day", "primock57_")
                    primock_id = primock_id.replace("consultation0", "")
                    primock_id = primock_id.replace("consultation", "")
                    
                    data = f.readlines()
                    data = [line.strip() for line in data if line.strip()]
                    transcript_lines = []
                    for line in data:
                        line = line.replace("Doctor:", "[doctor]")
                        line = line.replace("Patient:", "[patient]")
                        transcript_lines.append(line)
                    transcript_data[primock_id] = {
                        "transcript": "\n".join(transcript_lines)
                    }
    
    print(f"Found {len(transcript_data)} transcripts in Primock data")
    return transcript_data

def _load_annotations(path: str) -> List[dict]:
    # Expect a JSON array (list of dicts). If JSONL, we also handle it gracefully.
    if path.lower().endswith((".jsonl", ".ndjson")):
        out = []
        with open(path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                    out.append(json.loads(line))
        return out
    with open(path, "r", encoding="utf-8") as f:
        data = json.load(f)
    if isinstance(data, list):
        return data
    raise ValueError(f"{path} must be a JSON list (or JSONL).")

# ----------------------- HF builder -----------------------

class SimordMergeConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)

class SimordMerge(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        SimordMergeConfig(
            name="default",
            description="SIMORD with transcripts merged via attach_transcript_section from mediqa-oe.",
        )
    ]
    DEFAULT_CONFIG_NAME = "default"

    def _info(self) -> datasets.DatasetInfo:
        # We expose a compact schema:
        # - id
        # - transcript: sequence of {turn_id, speaker, transcript}
        # - raw: JSON string dump of the full (possibly augmented) annotation record
        features = datasets.Features(
            {
                "id": datasets.Value("string"),
                "transcript": datasets.Sequence(
                    {
                        "turn_id": datasets.Value("int32"),
                        "speaker": datasets.Value("string"),
                        "transcript": datasets.Value("string"),
                    }
                ),
                "expected_orders": datasets.Sequence(
                    {
                        "order_type": datasets.Value("string"),
                        "description": datasets.Value("string"),
                        "reason": datasets.Value("string"),
                        "provenance": datasets.Sequence(datasets.Value("int32")),
                    }
                )
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract(DATA_URLS)
    
        file_map = {
            "train.json": ("train", datasets.Split.TRAIN),
            "dev.json":   ("test1", "dev"),
            "test.json":  ("test2", "test"),
        }

        aci_root = dl_manager.download_and_extract(ACI_BENCH_URL)
        primock_root = dl_manager.download_and_extract(PRIMOCK_URL)
    
        self._transcript_dict = {}
        self._transcript_dict.update(_build_aci_transcript_dict(aci_root))
        self._transcript_dict.update(_build_primock_transcript_dict(primock_root))
        
        splits = []
        missing = []
        for fname, (exposed, split_name) in file_map.items():
            p = data_dir[split_name]
            if os.path.isfile(p):
                splits.append(
                    datasets.SplitGenerator(
                        name=exposed,
                        gen_kwargs={"ann_path": p},
                    )
                )
            else:
                missing.append(fname)
    
        if not splits:
            raise FileNotFoundError(
                f"No split files found under {data_dir}. "
                f"Expected one of: {', '.join(file_map.keys())}. Missing: {missing}"
            )
    
        return splits

    def _generate_examples(self, ann_path: str):
        section = _load_annotations(ann_path)
        attach_transcript_section(section, self._transcript_dict)
        for idx, rec in enumerate(section):
            rid = str(rec.get("id", idx))
            turns = rec.get("transcript") or []
            yield idx, {
                "id": rid,
                "transcript": turns,
                "expected_orders": rec,
            }