rafmacalaba commited on
Commit
82eb38c
·
1 Parent(s): 9540b6f
Files changed (2) hide show
  1. app.py +348 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ai4data import extract_from_text
3
+ DATA_MODEL_ID = "rafmacalaba/datause-extraction-v3-finetuned"
4
+ DATASET_COLORS = {
5
+ "named": "#ff6b6b",
6
+ "unnamed": "#51cf66",
7
+ "vague": "#fcc419",
8
+ "invalid": "#868e96"
9
+ }
10
+
11
+ RELATION_COLORS = {
12
+ "acronym": "#4dabf7", # REL_ACR
13
+ "author": "#f06595", # REL_AUT
14
+ "data description": "#e599f7", # REL_DSC
15
+ "data geography": "#20c997", # REL_GEO
16
+ "data type": "#339af0", # REL_TYP
17
+ "publication year": "#f783ac", # REL_PY
18
+ "publisher": "#4dabf7", # REL_PUB
19
+ "reference population": "#8e44ad", # REL_POP
20
+ "reference year": "#ffd43b", # REL_RY
21
+ # Removed on purpose — we are filtering these out
22
+ # "usage context": "#e8590c"
23
+ }
24
+
25
+ RELATION_DISPLAY = {
26
+ "data geography": "geography",
27
+ "data description": "description",
28
+ }
29
+
30
+
31
+ def overlaps(a, b):
32
+ # spans overlap if they share any character position
33
+ return not (a["end"] <= b["start"] or a["start"] >= b["end"])
34
+
35
+
36
+ # def collect_spans(res):
37
+ base = res[0]["text"]
38
+
39
+ dataset_spans = []
40
+ relation_spans = []
41
+ usage_context_map = {}
42
+ # --- Collect DATASETS (always keep) ---
43
+ for item in res:
44
+ ds = item.get("datasets")
45
+ if ds:
46
+ dataset_spans.append({
47
+ "start": ds["start"],
48
+ "end": ds["end"],
49
+ "label": ds["label"].lower(),
50
+ "kind": "dataset",
51
+ "text": base[ds["start"]:ds["end"]]
52
+ })
53
+
54
+ # --- Collect RELATIONS (but skip usage context) ---
55
+ for item in res:
56
+ for r in item.get("relations", []):
57
+ if r["relation"].lower() == "usage context": # remove usage context entirely
58
+ continue
59
+ if r.get("score", 0) < 0.6:
60
+ continue # ignore low-confidence relations
61
+ relation_spans.append({
62
+ "start": r["start"],
63
+ "end": r["end"],
64
+ "label": r["relation"].lower(),
65
+ "kind": "relation",
66
+ "text": base[r["start"]:r["end"]],
67
+ "source": r['source']
68
+ })
69
+
70
+ # --- Priority Rule: Dataset spans win over relation overlaps ---
71
+ def overlaps(a, b):
72
+ return not (a["end"] <= b["start"] or a["start"] >= b["end"])
73
+
74
+ dataset_spans = sorted(dataset_spans, key=lambda s: s["start"])
75
+
76
+ filtered_relations = []
77
+ for rel in relation_spans:
78
+ if any(overlaps(rel, ds) for ds in dataset_spans):
79
+ continue
80
+ filtered_relations.append(rel)
81
+
82
+ # --- Deduplicate spans ---
83
+ uniq = {}
84
+ for span in dataset_spans + filtered_relations:
85
+ key = (span["start"], span["end"], span["label"], span["kind"])
86
+ uniq[key] = span # last wins (order doesn't matter because datasets sorted first)
87
+
88
+ spans = list(uniq.values())
89
+ spans = sorted(spans, key=lambda s: s["start"])
90
+
91
+ return spans
92
+
93
+ def collect_spans(res):
94
+ base = res[0]["text"]
95
+
96
+ dataset_spans = []
97
+ relation_spans = []
98
+
99
+ # We store usage context *indexed by item*, not by string
100
+ usage_context_for_item = {}
101
+
102
+ # --- First: capture usage context by item index ---
103
+ for idx, item in enumerate(res):
104
+ for r in item.get("relations", []):
105
+ if r["relation"].lower() == "usage context":
106
+ usage_context_for_item[idx] = r.get("target", "").lower()
107
+
108
+ # --- Collect DATASETS and attach usage context correctly ---
109
+ for idx, item in enumerate(res):
110
+ ds = item.get("datasets")
111
+ if ds:
112
+ usage = usage_context_for_item.get(idx, None)
113
+ dataset_spans.append({
114
+ "start": ds["start"],
115
+ "end": ds["end"],
116
+ "label": ds["label"].lower(),
117
+ "kind": "dataset",
118
+ "text": base[ds["start"]:ds["end"]],
119
+ "usage": usage # ✅ always aligned — no string matching needed
120
+ })
121
+
122
+ # --- Collect RELATIONS (excluding usage context & low confidence) ---
123
+ for idx, item in enumerate(res):
124
+ for r in item.get("relations", []):
125
+ rel_label = r["relation"].lower()
126
+ if rel_label == "usage context":
127
+ continue
128
+ if r.get("score", 0) < 0.6:
129
+ continue
130
+ relation_spans.append({
131
+ "start": r["start"],
132
+ "end": r["end"],
133
+ "label": rel_label,
134
+ "kind": "relation",
135
+ "text": base[r["start"]:r["end"]],
136
+ "source": r['source'],
137
+ "source_item": idx # align with dataset via index
138
+ })
139
+
140
+ # --- Priority Rule: Dataset spans win over relation overlaps ---
141
+ def overlaps(a, b):
142
+ return not (a["end"] <= b["start"] or a["start"] >= b["end"])
143
+
144
+ dataset_spans = sorted(dataset_spans, key=lambda s: s["start"])
145
+
146
+ filtered_relations = []
147
+ for rel in relation_spans:
148
+ if any(overlaps(rel, ds) for ds in dataset_spans):
149
+ continue
150
+ filtered_relations.append(rel)
151
+
152
+ # --- Deduplicate ---
153
+ uniq = {(s["start"], s["end"], s["label"], s["kind"]): s
154
+ for s in dataset_spans + filtered_relations}
155
+
156
+ spans = sorted(uniq.values(), key=lambda s: s["start"])
157
+ return spans
158
+
159
+
160
+ def render_highlight(text):
161
+ res = extract_from_text(text, use_classifier_gate=False)
162
+ base = res[0]["text"]
163
+
164
+ spans = collect_spans(res)
165
+
166
+ if not spans:
167
+ return f"<div style='white-space:pre-wrap;'>{base}</div>"
168
+
169
+ html = """<style>
170
+ .hl {
171
+ position: relative;
172
+ cursor: pointer;
173
+ display: inline-block;
174
+ }
175
+
176
+ .hl:hover::after {
177
+ content: attr(data-tip);
178
+ position: absolute;
179
+ top: -2.2em;
180
+ left: 0;
181
+ background: #fff; /* white tooltip */
182
+ color: #222; /* dark text */
183
+ padding: 4px 8px;
184
+ font-size: 0.78em;
185
+ border-radius: 6px;
186
+ border: 1px solid #ccc; /* subtle border */
187
+ box-shadow: 0px 2px 6px rgba(0,0,0,0.15); /* soft shadow */
188
+ white-space: nowrap;
189
+ z-index: 9999;
190
+ opacity: 1;
191
+ }
192
+
193
+ .hl::after {
194
+ opacity: 0;
195
+ pointer-events: none;
196
+ transition: opacity 0.05s; /* still instant, but fades cleanly */
197
+ }
198
+ </style>
199
+ <div style='white-space:pre-wrap; font-family:monospace;'>
200
+ """
201
+
202
+
203
+ last = 0
204
+
205
+ for span in spans:
206
+ html += base[last:span["start"]]
207
+
208
+ if span["kind"] == "dataset":
209
+ color = DATASET_COLORS.get(span["label"], "#999")
210
+ else:
211
+ color = RELATION_COLORS.get(span["label"], "#74c0fc")
212
+
213
+ # if span["kind"] == "dataset":
214
+ # tooltip = span["label"]
215
+ if span["kind"] == "dataset":
216
+ tooltip = span["label"] if not span.get("usage") else f"usage context: {span['usage']}"
217
+
218
+ else:
219
+ # Show which dataset this relation corresponds to
220
+ # We look up the nearest dataset with same extraction item.
221
+ # But since source is provided already, use it directly:
222
+ tooltip = f"{span['label']} for {span['source']}"
223
+
224
+ display_label = RELATION_DISPLAY.get(span["label"], span["label"])
225
+ html += (
226
+ f"<span class='hl' data-tip='{tooltip}' "
227
+ f"style='background:{color}22; border:1px solid {color}; "
228
+ f"border-radius:4px; padding:0px 3px; margin:0 2px;'>"
229
+ f"{span['text']}"
230
+ f"<span style='border:1px solid {color}; color:{color}; "
231
+ f"font-size:0.7em; border-radius:3px; padding:1px 3px; margin-left:5px;'>"
232
+ f"{display_label}</span></span>"
233
+ )
234
+ last = span["end"]
235
+
236
+ html += base[last:]
237
+ html += "</div>"
238
+
239
+ return html
240
+
241
+ with gr.Blocks(
242
+ title="AI for Data Use: Dataset Extraction",
243
+ css="""
244
+ body, .gradio-container { background-color: #111 !important; color: #e6e6e6 !important; }
245
+
246
+ textarea, input, .gr-textbox {
247
+ background: #1a1a1a !important;
248
+ color: #f2f2f2 !important;
249
+ border: 1px solid #444 !important;
250
+ border-radius: 6px !important;
251
+ }
252
+ textarea:focus, input:focus {
253
+ border-color: #888 !important;
254
+ box-shadow: 0 0 0 1px #888 !important;
255
+ }
256
+
257
+ button {
258
+ background: #333 !important;
259
+ color: #eee !important;
260
+ border-radius: 6px !important;
261
+ border: 1px solid #444 !important;
262
+ padding: 8px 14px !important;
263
+ }
264
+ button:hover {
265
+ background: #444 !important;
266
+ border-color: #666 !important;
267
+ }
268
+
269
+ .gr-html {
270
+ background: #1a1a1a !important;
271
+ color: #fff !important;
272
+ border-radius: 6px !important;
273
+ border: 1px solid #333 !important;
274
+ padding: 14px !important;
275
+ min-height: 200px;
276
+ }
277
+ """
278
+ ) as demo:
279
+
280
+ gr.Markdown("# AI for Data Use: Dataset Extraction")
281
+
282
+ with gr.Row():
283
+ with gr.Column(scale=1):
284
+ input_box = gr.Textbox(lines=14, label="Input Text")
285
+
286
+ with gr.Column(scale=1):
287
+ output_box = gr.HTML(
288
+ label="Highlighted Output",
289
+ value="<div style='min-height:200px; border:1px solid #444; border-radius:8px; padding:10px; opacity:0.6; text-align:center;'>Waiting for input…</div>"
290
+ )
291
+
292
+ submit_btn = gr.Button("Submit")
293
+
294
+ submit_btn.click(
295
+ fn=render_highlight,
296
+ inputs=input_box,
297
+ outputs=output_box
298
+ )
299
+
300
+ gr.Examples(
301
+ examples=[
302
+ ["We examine early childhood nutrition and health inequality across Sub-Saharan Africa by combining information from the Demographic and Health Surveys (DHS, various years) and the Multiple Indicator Cluster Surveys (MICS). Anthropometric outcomes (height-for-age, weight-for-age) are standardized using WHO Child Growth Standards. To account for environmental exposure, we align survey cluster coordinates with gridded temperature and precipitation data from the CRU TS 4.06 dataset at the nearest spatial cell."],
303
+ ["Introduction The mining sector in Africa is growing rapidly and is the main recipient of foreign direct investment ( World Bank 2011 ). The welfare effects of this sector are not well understood, although a literature has recently developed around this question. The main contribution of this paper is to shed light on the welfare effects of gold mining in a detailed, in-depth country study of Ghana, a country with a long tradition of gold mining and a recent, large expansion in capital - intensive and industrial-scale production. A second contribution of this paper is to show the importance of decomposing the effects with respect to distance from the mines. Given the spatial heterogeneity of the results, we explore the effects in an individual-level, difference-in-differences analysis by using spatial lag models to allow for nonlinear effects with distance from mine. We also allow for spillovers across districts, in a district-level analysis. We use two complementary geocoded household data sets to analyze outcomes in Ghana: the Demographic and Health Survey ( DHS ) and the Ghana Living Standard Survey ( GLSS ), which provide information on a wide range of welfare outcomes."],
304
+ ["The main mining data is a dataset from InterraRMG covering all large-scale mines in Ghana, explained in more detail in section 3. 1. This dataset is linked to survey data from the DHS and GLSS, using spatial information. Geographical coordinates of enumeration areas in GLSS are from Ghana Statistical Services ( GSS ). 2 Point coordinates ( global positioning system [ GPS ] ) for the surveyed DHS clusters3 allow us to match all individuals to one or several mineral mines. We do this in two ways. First, we calculate distance spans from an exact mine location given by its GPS coordinates, and match surveyed individuals to mines. "],
305
+ ["We study learning outcomes by linking standardized test score data from the Programme for International Student Assessment (PISA) and the Trends in International Mathematics and Science Study (TIMSS) to school resource indicators compiled by the UNESCO Institute for Statistics (UIS). Household socioeconomic background is proxied using parental education information extracted from the Demographic and Health Surveys (DHS) female and household datasets. To examine the role of inequality, we calculate within-school and between-school variance components and correlate these with school funding data from national Ministry of Education financial reports. The goal is to understand how unequal resource allocation contributes to learning gaps across income groups and geographic regions."],
306
+ ["Patterns of forced displacement are analyzed using the UNHCR Refugee Population Statistics Database, which provides annual country-to-country asylum flows and refugee stock counts. To understand displacement drivers, we integrate conflict event data from the ACLED conflict monitoring system and national political stability indices from the World Governance Indicators (WGI). We additionally incorporate bilateral migration dyads from the World Bank Global Bilateral Migration Database to capture historical migration ties. Geographic exposure to violence is assigned using region-level coordinates and spatial joining procedures. This dataset allows us to estimate the relationship between escalating conflict intensity and subsequent cross-border population movements."],
307
+ ],
308
+ inputs=input_box
309
+ )
310
+
311
+ gr.Markdown("""
312
+ <hr style='border: none; border-top: 1px solid #333; margin: 20px 0;'>
313
+
314
+ ### AI for Data Use: Dataset Extraction
315
+
316
+ This tool identifies **dataset mentions** (e.g., *Demographic and Health Survey*, *Living Standards and Measurement Survey*, etc.) and extracts **contextual metadata** such as:
317
+ - publisher
318
+ - publication year
319
+ - reference year
320
+ - geography
321
+ - acronym
322
+ - reference population
323
+ - data description
324
+ - data type
325
+ Usage context:
326
+ primary, background, supporting mention - hover through each datasets to see its usage context.
327
+
328
+
329
+ The detected spans are **highlighted inline** with labels so you can quickly review how datasets are being referenced in text.
330
+
331
+ #### How to Use
332
+ 1. Paste or type text into the input box (left) or use any of the examples.
333
+ 2. Click **Submit**.
334
+ 3. Review highlighted dataset mentions and relation tags (right).
335
+ 4. Hover over highlights to see details.
336
+
337
+ <hr style='border: none; border-top: 1px solid #333; margin: 20px 0;'>
338
+
339
+ **Resources**
340
+ - Model: https://huggingface.co/{DATA_MODEL_ID}
341
+ - Paper (ArXiv): https://arxiv.org/pdf/2502.10263
342
+ - GLiNER Repo: https://github.com/urchade/GLiNER
343
+ - Project Docs: https://worldbank.github.io/ai4data-use/docs/introduction.html
344
+ """)
345
+
346
+ demo.launch()
347
+
348
+
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ datasets
3
+ transformers
4
+ accelerate
5
+ sentencepiece
6
+ gliner
7
+ huggingface_hub
8
+ -e ./ai4data_use