File size: 23,682 Bytes
a0234ff
82eb38c
a0234ff
 
 
 
bab3a78
 
f1e7a1d
bab3a78
e30ba2f
 
2d0c15e
e30ba2f
 
 
 
2d0c15e
bab3a78
 
 
 
 
 
 
 
 
 
a0234ff
bab3a78
b9d31cc
 
82eb38c
a0234ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82eb38c
 
a0234ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82eb38c
a0234ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82eb38c
a0234ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82eb38c
a0234ff
 
 
 
 
82eb38c
a0234ff
 
 
 
82eb38c
a0234ff
 
 
82eb38c
a0234ff
 
 
 
82eb38c
a0234ff
 
 
82eb38c
a0234ff
 
 
82eb38c
a0234ff
 
82eb38c
a0234ff
 
 
 
 
 
 
 
 
 
82eb38c
a0234ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82eb38c
a0234ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82eb38c
a0234ff
 
82eb38c
a0234ff
 
 
 
 
 
 
 
 
 
 
 
82eb38c
 
a0234ff
 
 
 
 
82eb38c
a0234ff
 
 
 
82eb38c
a0234ff
 
 
 
 
 
 
82eb38c
 
a0234ff
82eb38c
a0234ff
 
 
 
 
efc7c1d
a0234ff
 
 
 
 
82eb38c
 
a0234ff
 
 
 
 
 
 
 
 
82eb38c
a0234ff
 
 
82eb38c
 
a0234ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82eb38c
 
 
 
 
 
 
 
 
 
 
 
 
546d517
82eb38c
546d517
 
 
 
82eb38c
 
546d517
 
 
 
 
 
82eb38c
 
 
 
 
 
 
 
 
 
 
a0234ff
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
import re
import gradio as gr
from collections import defaultdict
import html
from typing import List

import os
import subprocess
import sys

# # Ensure correct hub version before anything else
# subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", "huggingface_hub==0.24.5"], check=True)

# # Optional: safety patch
# import huggingface_hub.constants as constants
# if not hasattr(constants, "HF_HUB_ENABLE_HF_TRANSFER"):
#     constants.HF_HUB_ENABLE_HF_TRANSFER = False

GH_TOKEN = os.getenv("GH_TOKEN")
if GH_TOKEN:
    subprocess.run(
        [
            "pip", "install",
            f"git+https://{GH_TOKEN}:x-oauth-basic@github.com/rafmacalaba/ai4data_use.git"
        ],
        check=True
    )
else:
    print("GH_TOKEN not found. Private ai4data_use will NOT be installed.")

from ai4data import extract_from_text, deduplicate_mentions

DATA_MODEL_ID = "rafmacalaba/datause-extraction-v3-finetuned"

def tokenize_text(text: str) -> List[str]:
    """Tokenize the input text into a list of tokens (words, punctuation)."""
    return re.findall(r'\w+(?:[-_]\w+)*|\S', text)

def truncate_text(text: str, max_tokens: int = 300) -> str:
    """
    Tokenize the text and truncate to the first `max_tokens` tokens,
    rejoining them into a valid string.
    """
    tokens = tokenize_text(text)
    if len(tokens) <= max_tokens:
        return text.strip()
    truncated = tokens[:max_tokens]
    return " ".join(truncated).strip() + " ..."

def process_text(text, distance_threshold=1000, short_threshold=100, debug=False, precomputed_results=None):
    if not text.strip():
        return [], []

    # Use precomputed results if provided
    results = precomputed_results if precomputed_results is not None else extract_from_text(text)
    if not results:
        return [], []

    base_text = results[0]["text"].strip()
    from collections import defaultdict
    import re

    entity_map = defaultdict(list)
    dataset_spans = []  # (start, end, text, label)
    all_relations = []
    absorbed_vague_datasets = set()

    # --- 1️⃣ Collect dataset spans ---
    for item in results:
        ds = item.get("datasets")
        if ds:
            start, end = ds["start"], ds["end"]
            ds_label = ds["label"]
            ds_text = ds["text"]
            dataset_spans.append((start, end, ds_text, ds_label))

    # --- 2️⃣ Collect relations (keep for tree even if overlapping) ---
    seen_descriptions = set()
    for item in results:
        for rel in item.get("relations", []):
            start, end = rel["start"], rel["end"]
            src = rel["source"]
            relation = rel["relation"].strip().lower()
            target_text = base_text[start:end].strip()

            # Skip implausible year relations
            if relation in {"reference year", "publication year"} and not re.search(r"\b\d{4}\b", target_text):
                continue

            limit = short_threshold if relation in {"data description", "data type"} else distance_threshold
            linked_datasets = []

            # --- Find nearby datasets within threshold ---
            if dataset_spans:
                for ds_start, ds_end, ds_text, _ in dataset_spans:
                    dist = abs(ds_start - start)
                    if dist <= limit:
                        linked_datasets.append(ds_text)

            # --- If none found, apply shared-relation logic ---
            if not linked_datasets:
                # Shared or global relations — always apply to all datasets
                if relation in {"publisher", "reference year", "publication year", "usage context"} and dataset_spans:
                    linked_datasets = [d[2] for d in dataset_spans]
                else:
                    continue  # skip too-distant or unrelated relation

            # --- Supersession rule: vague dataset near named one ---
            if relation in {"data type"}:
                for ds_start, ds_end, ds_text, ds_label in dataset_spans:
                    # only absorb if relation's source explicitly mentions this dataset
                    if ds_label == "vague" and ds_text in rel["source"] and abs(ds_start - start) <= short_threshold:
                        absorbed_vague_datasets.add(ds_text)

            # --- Deduplication for data description ---
            if relation == "data description":
                key = target_text.lower()
                if key in seen_descriptions:
                    continue
                seen_descriptions.add(key)

            # --- Store relation for all linked datasets ---
            for ds_text in linked_datasets:
                relation_entry = {
                    "start": start,
                    "end": end,
                    "relation": relation,
                    "src": ds_text,
                }

                # only store text value for usage context
                if relation == "usage context":
                    relation_entry["value"] = target_text or ""

                all_relations.append(relation_entry)
                if debug:
                    print(f"Linked {relation}{ds_text}")

    # --- 3️⃣ Add dataset highlights (priority layer) ---
    for ds_start, ds_end, ds_text, ds_label in dataset_spans:
        if ds_label == "vague" and ds_text in absorbed_vague_datasets:
            continue
        entity_map[(ds_start, ds_end)].append(f"dataset:{ds_label}")

    # --- 4️⃣ Add relation spans, skip those overlapping datasets ---
    relation_priority = [
        "acronym", "publisher", "data type", "data description",
        "reference year", "publication year", "data geography", "usage context"
    ]

    relation_map = defaultdict(list)
    for rel in all_relations:
        key = (rel["start"], rel["end"])
        relation_map[key].append(rel["relation"])

    for (start, end), rels in relation_map.items():
        # --- improved overlap: skip if relation is fully or partially inside a dataset span ---
        overlaps_dataset = False
        for ds_start, ds_end, _, _ in dataset_spans:
            if (start >= ds_start and end <= ds_end) or (start < ds_end and end > ds_start):
                overlaps_dataset = True
                break

        if overlaps_dataset:
            continue  # keep for tree, not highlight

        # choose top relation if multiple overlap on same span
        if len(rels) > 1:
            rels.sort(key=lambda r: relation_priority.index(r) if r in relation_priority else len(relation_priority))
        top_relation = rels[0]
        entity_map[(start, end)].append(top_relation)

    # --- 5️⃣ Rebuild final text output for Gradio highlight ---
    sorted_spans = sorted(entity_map.items(), key=lambda x: x[0][0])
    output = []
    last_idx = 0
    for (start, end), labels in sorted_spans:
        if start > last_idx:
            output.append((base_text[last_idx:start], None))
        substring = base_text[start:end]
        label = ", ".join(labels)
        output.append((substring, label))
        last_idx = end

    if last_idx < len(base_text):
        output.append((base_text[last_idx:], None))

    return output, results



# --- 🌍 Long, realistic research examples ---
example_texts = [
    # 1️⃣ Ghana mining + household surveys
    """Introduction The mining sector in Africa is growing rapidly and remains the main recipient of foreign direct investment ( World Bank 2011 ). The welfare effects of this sector are not well understood, although a literature has recently developed around this question. The main contribution of this paper is to shed light on the welfare effects of gold mining in a detailed, in-depth country study of Ghana, a country with a long tradition of gold mining and a recent, large expansion in capital-intensive and industrial-scale production. We use two complementary geocoded household data sets to analyze outcomes in Ghana: the Demographic and Health Survey ( DHS ) and the Ghana Living Standard Survey ( GLSS ), both of which provide information on household welfare, education, and demographic outcomes. The empirical analysis relies on district-level spatial models and difference-in-differences estimation to capture local spillover effects. We also examine how outcomes vary by proximity to large mines and across survey years from 2005 to 2015, controlling for baseline characteristics and mining expansion phases.""",

    # 2️⃣ WDI, IEA, and energy transition
    """This study analyzes labor and household data in the context of Afghan refugees in Iran. The national household surveys in Iran have good coverage of them. 
The Labor Force Survey (LFS) has been conducted in the middle of each quarter since spring 2005 and is the primary source of employment statistics in Iran. 
The LFS covers a wide range of topics including household members’ demographic and employment status, such as education, migration, working hours, industry, occupation, and experience (but not wage and income). 
Importantly for this research, the nationality of each household member is also inquired in this survey. The sampling of LFS is on a rotating panel basis in the sense that each household is sampled in two consecutive seasons of two consecutive years. 
This feature enables us to observe each individual’s change in employment status and compare it between two communities. Table 1 lists the LFS rounds with the available sample of Afghan refugees. 
HEIS (Household Expenditure and Income Survey) has been conducted annually since 1963, but its raw data is available from 1984 onwards.""",

    # 3️⃣ DHS and MICS in maternal and child health
   """Observations are also spatially identified at the municipality level, but here we focus on variation in the Venezuelan share of the population at the province level, of which there are 196, as these are best representative of local labor markets. The Latin American Public Opinion Project (LAPOP) is an opinion survey conducted bi-annually in all countries in Latin America and designed to be representative of urban populations. This was fielded in Peru in 2010, 2012, 2014, 2017 and 2019 and consists of about 2,000 observations from mostly urban areas."""
,

    # 4️⃣ UNHCR, ProGres, and displacement data
    """Such null effects of refugee migration on native attitudes in host communities are also identified by Zhou et al. (2021) for the case of Sudanese refugee immigration to Uganda. Our analysis relies on data from the following sources: the Encuesta Dirigida a la Población Venezolana que Reside en el País (ENPOVE), which is a specialized survey of Venezuelans living in Peru conducted by the National Institute of Statistics (INEI) in December 2018. The survey covers five main urban areas in the country where Venezuelan immigrants were most likely to be present.""",
]

def make_tree_from_highlight(highlight_output, max_distance=250):
    """
    Build dataset–relation tree using simplified labels from process_text().
    Proximity-based grouping: attach relations to nearest dataset mention.
    """
    if not highlight_output:
        return "<i>No relations found.</i>"

    # Collect dataset mentions and relation mentions
    datasets = []
    relations = []
    for i, (segment, label) in enumerate(highlight_output):
        if not label:
            continue
        labels = [lbl.strip() for lbl in label.split(",")]
        for lbl in labels:
            if lbl.startswith("dataset:"):
                datasets.append({
                    "idx": i,
                    "name": segment.strip(),
                    "label": lbl.split(":")[1].strip()
                })
            elif lbl in {
                "acronym", "publisher", "data type", "data description",
                "reference year", "publication year", "data geography", "usage context"
            }:
                # ✅ Store 'target' and 'value' separately — only for usage context
                rel_entry = {"idx": i, "type": lbl, "target": segment.strip()}
                if lbl == "usage context":
                    rel_entry["value"] = segment.strip()  # target text = value
                relations.append(rel_entry)

    if not datasets and not relations:
        return "<i>No relations found.</i>"

    # Build a dict to hold dataset → {relation_type → [targets]}
    tree = {ds["name"]: {"_dataset_label": ds["label"]} for ds in datasets}

    # Attach relations to nearest dataset mention (proximity-based)
    for rel in relations:
        if not datasets:
            continue
        nearest = min(datasets, key=lambda ds: abs(ds["idx"] - rel["idx"]))
        if abs(nearest["idx"] - rel["idx"]) <= max_distance:
            tree.setdefault(nearest["name"], {"_dataset_label": nearest["label"]})
            rel_type = rel["type"]
            # ✅ store 'value' separately for usage context
            if rel_type == "usage context":
                tree[nearest["name"]].setdefault(rel_type, set()).add(rel.get("value", ""))
            else:
                tree[nearest["name"]].setdefault(rel_type, set()).add(rel["target"])

    # --- Render HTML ---
    html_out = ["<div style='font-family:monospace; font-size:0.9em; line-height:1.5;'>"]
    for ds_name, rels in sorted(tree.items()):
        ds_label = rels.get("_dataset_label")
        ds_badge = f" <span style='color:gray;'>[{html.escape(ds_label)}]</span>" if ds_label else ""
        html_out.append(
            f"<details open><summary><b style='color:#4da6ff;'>{html.escape(ds_name)}</b>{ds_badge}</summary><ul>"
        )
        for rel_type, targets in sorted(rels.items()):
            if rel_type == "_dataset_label":
                continue
            for t in sorted(targets):
                # ✅ Conditional rendering for usage context
                if rel_type == "usage context":
                    html_out.append(
                        f"<li><span style='color:#80c904;'>{html.escape(rel_type)}</span>: "
                        f"<i>{html.escape(t)}</i></li>"
                    )
                else:
                    html_out.append(
                        f"<li><span style='color:#80c904;'>{html.escape(rel_type)}</span>: {html.escape(t)}</li>"
                    )
        html_out.append("</ul></details>")
    html_out.append("</div>")

    return "\n".join(html_out)

import html

def make_relation_tree_from_results(highlight_output, results):
    """
    Build relation tree directly from model results,
    ensuring all relations (even those not highlighted) are captured.
    """
    if not results:
        return "<i>No extracted relations found.</i>"

    # Collect dataset mentions and their relations
    datasets = []
    tree = {}

    for item in results:
        ds = item.get("datasets")
        rels = item.get("relations", [])
        if not ds:
            continue

        ds_name = ds.get("text", "").strip()
        ds_label = ds.get("label", "")
        if not ds_name:
            continue

        # Initialize dataset node
        if ds_name not in tree:
            tree[ds_name] = {"_dataset_label": ds_label}

        # Collect all relations
        for rel in rels:
            rel_type = rel.get("relation", "").strip().lower()
            target = rel.get("target", "").strip()

            # Skip empty or malformed relations
            if not rel_type or not target:
                continue

            # Special handling for usage context (value-only)
            if rel_type == "usage context" and "value" in rel:
                target = rel["value"]

            # Attach relation under this dataset
            tree[ds_name].setdefault(rel_type, set()).add(target)

    # Render HTML
    html_out = ["<div style='font-family:monospace; font-size:0.9em; line-height:1.5;'>"]
    for ds_name, rels in sorted(tree.items()):
        ds_label = rels.get("_dataset_label", "")
        ds_badge = f" <span style='color:gray;'>[{html.escape(ds_label)}]</span>" if ds_label else ""
        html_out.append(
            f"<details open><summary><b style='color:#4da6ff;'>{html.escape(ds_name)}</b>{ds_badge}</summary><ul>"
        )
        for rel_type, targets in sorted(rels.items()):
            if rel_type == "_dataset_label":
                continue
            for t in sorted(targets):
                html_out.append(
                    f"<li><span style='color:#80c904;'>{html.escape(rel_type)}</span>: {html.escape(t)}</li>"
                )
        html_out.append("</ul></details>")
    html_out.append("</div>")

    return "\n".join(html_out)

import html
from collections import defaultdict
# ========================================
# Build Tree from Deduplicated Mentions
# ========================================
import html
import re

def make_relation_tree_from_dedup(dedup_results):
    """
    Build a cleaned, deduplicated relation tree with prefiltering.
    - Publishers that are dataset names are removed
    - Years validated with regex \b\d{4}\b
    - Empty/noisy values removed
    """

    if not dedup_results or not isinstance(dedup_results, list):
        return "<i>No deduplicated results found.</i>"

    # --- prepare dataset name list for filtering publisher noise ---
    dataset_names = {d.get("text", "").lower() for d in dedup_results if d.get("text")}
    relation_priority = [
        "acronym", "author", "publisher", "data type", "data description",
        "reference year", "publication year", "data geography",
        "reference population", "usage context"
    ]

    def _filter_values(rel_type, values):
        """relation-specific filtering logic"""
        if not values:
            return []

        # ensure list
        if isinstance(values, str):
            values = [values]
        elif not isinstance(values, (list, set, tuple)):
            return []

        clean = []
        for v in values:
            if not v or not isinstance(v, str):
                continue
            val = v.strip()
            if not val:
                continue

            # publisher should not be dataset name
            if rel_type == "publisher":
                if val.lower() in dataset_names:
                    continue
                # also skip if it's a dataset acronym
                if any(val.lower() in str(a).lower() for d in dedup_results for a in d.get("acronym", [])):
                    continue

            # year fields must contain 4-digit year
            if rel_type in {"reference year", "publication year"}:
                if not re.search(r"\b\d{4}\b", val):
                    continue

            # simple length sanity check
            if len(val) > 150:
                continue

            clean.append(val)
        return sorted(set(clean))

    # --- Build HTML output ---
    html_out = ["<div style='font-family:monospace; font-size:0.9em; line-height:1.5;'>"]

    for entry in dedup_results:
        ds_name = entry.get("text", "").strip()
        ds_label = entry.get("label", "")
        if not ds_name:
            continue

        ds_badge = f" <span style='color:gray;'>[{html.escape(ds_label)}]</span>" if ds_label else ""
        html_out.append(
            f"<details open><summary><b style='color:#4da6ff;'>{html.escape(ds_name)}</b>{ds_badge}</summary><ul>"
        )

        rel_keys = [k for k in entry.keys() if k not in {
            "text", "label", "score", "count", "form_counts", "other_datasets",
            "mentioned_in_list", "mentioned_in_sentence_list", "pages",
            "sources", "start_indices", "end_indices", "raw_contexts"
        }]

        rel_keys = sorted(
            rel_keys,
            key=lambda r: relation_priority.index(r) if r in relation_priority else len(relation_priority)
        )

        for rel_key in rel_keys:
            vals = _filter_values(rel_key, entry.get(rel_key))
            if not vals:
                continue
            html_out.append(
                f"<li><span style='color:#80c904;'>{html.escape(rel_key)}</span>: "
                f"{html.escape('; '.join(vals))}</li>"
            )

        html_out.append("</ul></details>")

    html_out.append("</div>")
    return "\n".join(html_out)


with gr.Blocks() as demo:
    gr.Markdown("## 🧠 AI for Data Use: Dataset Extraction")

    inp = gr.Textbox(
        label="Input Text",
        lines=10,
        placeholder="Paste or type a paragraph here..."
    )

    run_btn = gr.Button("🚀 Run Extraction")
    out = gr.HighlightedText(label="Highlighted Datasets and Relations")

    tree_btn = gr.Button("🧭 Show / Refresh Relation Tree")
    tree_html = gr.HTML(label="Relation Tree", visible=False)

    model_state = gr.State()      # raw extract_from_text() results
    highlight_state = gr.State()  # processed highlights

    gr.Examples(
        examples=example_texts,
        inputs=inp,
        label="🧪 Try these examples"
    )

    # --- main pipeline ---
    def run_pipeline(text):
        text = truncate_text(text, max_tokens=300)
        highlights, results = process_text(text)
        return highlights, results, highlights

    run_btn.click(
        fn=run_pipeline,
        inputs=inp,
        outputs=[out, model_state, highlight_state]
    ).then(
        fn=lambda h, m: gr.update(
            visible=True,
            value=make_relation_tree_from_dedup(deduplicate_mentions(m))
        ),
        inputs=[highlight_state, model_state],
        outputs=tree_html
    )

    # --- manual refresh ---
    def refresh_relation_tree(h, m):
        if not h or not m:
            return gr.update(visible=True, value="<i>No extracted data yet — run extraction first.</i>")
        html_tree = make_relation_tree_from_dedup(deduplicate_mentions(m))
        return gr.update(visible=True, value=html_tree)

    tree_btn.click(fn=refresh_relation_tree, inputs=[highlight_state, model_state], outputs=tree_html)

    gr.Markdown(f"""
<hr style='border: none; border-top: 1px solid #333; margin: 20px 0;'>

### AI for Data Use: Dataset Extraction

This tool identifies **dataset mentions** (e.g., *Demographic and Health Survey*, *Living Standards and Measurement Survey*, etc.) and extracts **contextual metadata** such as:
- publisher  
- publication year  
- reference year
- geography  
- acronym  
- reference population  
- data description
- data type
- usage context

**Usage Context Definitions**
- **Primary mention** – the dataset is the **main source of analysis or results** in the study.  
- **Supporting mention** – the dataset is **used alongside other data** to complement or validate findings.  
- **Background mention** – the dataset is **mentioned for context or comparison** but **not used** in the actual analysis.

#### How to Use
1. Paste or type text into the input box (left), or select one of the provided examples.  
2. Click **🚀 Run Extraction** to process the text.  
3. The model will **highlight all detected dataset mentions** and related entities (e.g., publisher, geography, year, usage context) directly in the text.  
4. Below the highlights, a **deduplicated relation tree** will automatically appear, showing each dataset with its extracted metadata and filtered attributes.  
5. You can click **🧭 Show / Refresh Relation Tree** anytime to rebuild or inspect the deduplicated metadata view.


<hr style='border: none; border-top: 1px solid #333; margin: 20px 0;'>

**Resources**
- Model: https://huggingface.co/{DATA_MODEL_ID}
- Paper (ArXiv): https://arxiv.org/pdf/2502.10263
- GLiNER Repo: https://github.com/urchade/GLiNER
- Project Docs: https://worldbank.github.io/ai4data-use/docs/introduction.html
""")


demo.launch()