rafmacalaba's picture
add mrkdown
546d517
import re
import gradio as gr
from collections import defaultdict
import html
from typing import List
import os
import subprocess
import sys
# # Ensure correct hub version before anything else
# subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", "huggingface_hub==0.24.5"], check=True)
# # Optional: safety patch
# import huggingface_hub.constants as constants
# if not hasattr(constants, "HF_HUB_ENABLE_HF_TRANSFER"):
# constants.HF_HUB_ENABLE_HF_TRANSFER = False
GH_TOKEN = os.getenv("GH_TOKEN")
if GH_TOKEN:
subprocess.run(
[
"pip", "install",
f"git+https://{GH_TOKEN}:x-oauth-basic@github.com/rafmacalaba/ai4data_use.git"
],
check=True
)
else:
print("GH_TOKEN not found. Private ai4data_use will NOT be installed.")
from ai4data import extract_from_text, deduplicate_mentions
DATA_MODEL_ID = "rafmacalaba/datause-extraction-v3-finetuned"
def tokenize_text(text: str) -> List[str]:
"""Tokenize the input text into a list of tokens (words, punctuation)."""
return re.findall(r'\w+(?:[-_]\w+)*|\S', text)
def truncate_text(text: str, max_tokens: int = 300) -> str:
"""
Tokenize the text and truncate to the first `max_tokens` tokens,
rejoining them into a valid string.
"""
tokens = tokenize_text(text)
if len(tokens) <= max_tokens:
return text.strip()
truncated = tokens[:max_tokens]
return " ".join(truncated).strip() + " ..."
def process_text(text, distance_threshold=1000, short_threshold=100, debug=False, precomputed_results=None):
if not text.strip():
return [], []
# Use precomputed results if provided
results = precomputed_results if precomputed_results is not None else extract_from_text(text)
if not results:
return [], []
base_text = results[0]["text"].strip()
from collections import defaultdict
import re
entity_map = defaultdict(list)
dataset_spans = [] # (start, end, text, label)
all_relations = []
absorbed_vague_datasets = set()
# --- 1️⃣ Collect dataset spans ---
for item in results:
ds = item.get("datasets")
if ds:
start, end = ds["start"], ds["end"]
ds_label = ds["label"]
ds_text = ds["text"]
dataset_spans.append((start, end, ds_text, ds_label))
# --- 2️⃣ Collect relations (keep for tree even if overlapping) ---
seen_descriptions = set()
for item in results:
for rel in item.get("relations", []):
start, end = rel["start"], rel["end"]
src = rel["source"]
relation = rel["relation"].strip().lower()
target_text = base_text[start:end].strip()
# Skip implausible year relations
if relation in {"reference year", "publication year"} and not re.search(r"\b\d{4}\b", target_text):
continue
limit = short_threshold if relation in {"data description", "data type"} else distance_threshold
linked_datasets = []
# --- Find nearby datasets within threshold ---
if dataset_spans:
for ds_start, ds_end, ds_text, _ in dataset_spans:
dist = abs(ds_start - start)
if dist <= limit:
linked_datasets.append(ds_text)
# --- If none found, apply shared-relation logic ---
if not linked_datasets:
# Shared or global relations β€” always apply to all datasets
if relation in {"publisher", "reference year", "publication year", "usage context"} and dataset_spans:
linked_datasets = [d[2] for d in dataset_spans]
else:
continue # skip too-distant or unrelated relation
# --- Supersession rule: vague dataset near named one ---
if relation in {"data type"}:
for ds_start, ds_end, ds_text, ds_label in dataset_spans:
# only absorb if relation's source explicitly mentions this dataset
if ds_label == "vague" and ds_text in rel["source"] and abs(ds_start - start) <= short_threshold:
absorbed_vague_datasets.add(ds_text)
# --- Deduplication for data description ---
if relation == "data description":
key = target_text.lower()
if key in seen_descriptions:
continue
seen_descriptions.add(key)
# --- Store relation for all linked datasets ---
for ds_text in linked_datasets:
relation_entry = {
"start": start,
"end": end,
"relation": relation,
"src": ds_text,
}
# only store text value for usage context
if relation == "usage context":
relation_entry["value"] = target_text or ""
all_relations.append(relation_entry)
if debug:
print(f"Linked {relation} β†’ {ds_text}")
# --- 3️⃣ Add dataset highlights (priority layer) ---
for ds_start, ds_end, ds_text, ds_label in dataset_spans:
if ds_label == "vague" and ds_text in absorbed_vague_datasets:
continue
entity_map[(ds_start, ds_end)].append(f"dataset:{ds_label}")
# --- 4️⃣ Add relation spans, skip those overlapping datasets ---
relation_priority = [
"acronym", "publisher", "data type", "data description",
"reference year", "publication year", "data geography", "usage context"
]
relation_map = defaultdict(list)
for rel in all_relations:
key = (rel["start"], rel["end"])
relation_map[key].append(rel["relation"])
for (start, end), rels in relation_map.items():
# --- improved overlap: skip if relation is fully or partially inside a dataset span ---
overlaps_dataset = False
for ds_start, ds_end, _, _ in dataset_spans:
if (start >= ds_start and end <= ds_end) or (start < ds_end and end > ds_start):
overlaps_dataset = True
break
if overlaps_dataset:
continue # keep for tree, not highlight
# choose top relation if multiple overlap on same span
if len(rels) > 1:
rels.sort(key=lambda r: relation_priority.index(r) if r in relation_priority else len(relation_priority))
top_relation = rels[0]
entity_map[(start, end)].append(top_relation)
# --- 5️⃣ Rebuild final text output for Gradio highlight ---
sorted_spans = sorted(entity_map.items(), key=lambda x: x[0][0])
output = []
last_idx = 0
for (start, end), labels in sorted_spans:
if start > last_idx:
output.append((base_text[last_idx:start], None))
substring = base_text[start:end]
label = ", ".join(labels)
output.append((substring, label))
last_idx = end
if last_idx < len(base_text):
output.append((base_text[last_idx:], None))
return output, results
# --- 🌍 Long, realistic research examples ---
example_texts = [
# 1️⃣ Ghana mining + household surveys
"""Introduction The mining sector in Africa is growing rapidly and remains the main recipient of foreign direct investment ( World Bank 2011 ). The welfare effects of this sector are not well understood, although a literature has recently developed around this question. The main contribution of this paper is to shed light on the welfare effects of gold mining in a detailed, in-depth country study of Ghana, a country with a long tradition of gold mining and a recent, large expansion in capital-intensive and industrial-scale production. We use two complementary geocoded household data sets to analyze outcomes in Ghana: the Demographic and Health Survey ( DHS ) and the Ghana Living Standard Survey ( GLSS ), both of which provide information on household welfare, education, and demographic outcomes. The empirical analysis relies on district-level spatial models and difference-in-differences estimation to capture local spillover effects. We also examine how outcomes vary by proximity to large mines and across survey years from 2005 to 2015, controlling for baseline characteristics and mining expansion phases.""",
# 2️⃣ WDI, IEA, and energy transition
"""This study analyzes labor and household data in the context of Afghan refugees in Iran. The national household surveys in Iran have good coverage of them.
The Labor Force Survey (LFS) has been conducted in the middle of each quarter since spring 2005 and is the primary source of employment statistics in Iran.
The LFS covers a wide range of topics including household members’ demographic and employment status, such as education, migration, working hours, industry, occupation, and experience (but not wage and income).
Importantly for this research, the nationality of each household member is also inquired in this survey. The sampling of LFS is on a rotating panel basis in the sense that each household is sampled in two consecutive seasons of two consecutive years.
This feature enables us to observe each individual’s change in employment status and compare it between two communities. Table 1 lists the LFS rounds with the available sample of Afghan refugees.
HEIS (Household Expenditure and Income Survey) has been conducted annually since 1963, but its raw data is available from 1984 onwards.""",
# 3️⃣ DHS and MICS in maternal and child health
"""Observations are also spatially identified at the municipality level, but here we focus on variation in the Venezuelan share of the population at the province level, of which there are 196, as these are best representative of local labor markets. The Latin American Public Opinion Project (LAPOP) is an opinion survey conducted bi-annually in all countries in Latin America and designed to be representative of urban populations. This was fielded in Peru in 2010, 2012, 2014, 2017 and 2019 and consists of about 2,000 observations from mostly urban areas."""
,
# 4️⃣ UNHCR, ProGres, and displacement data
"""Such null effects of refugee migration on native attitudes in host communities are also identified by Zhou et al. (2021) for the case of Sudanese refugee immigration to Uganda. Our analysis relies on data from the following sources: the Encuesta Dirigida a la PoblaciΓ³n Venezolana que Reside en el PaΓ­s (ENPOVE), which is a specialized survey of Venezuelans living in Peru conducted by the National Institute of Statistics (INEI) in December 2018. The survey covers five main urban areas in the country where Venezuelan immigrants were most likely to be present.""",
]
def make_tree_from_highlight(highlight_output, max_distance=250):
"""
Build dataset–relation tree using simplified labels from process_text().
Proximity-based grouping: attach relations to nearest dataset mention.
"""
if not highlight_output:
return "<i>No relations found.</i>"
# Collect dataset mentions and relation mentions
datasets = []
relations = []
for i, (segment, label) in enumerate(highlight_output):
if not label:
continue
labels = [lbl.strip() for lbl in label.split(",")]
for lbl in labels:
if lbl.startswith("dataset:"):
datasets.append({
"idx": i,
"name": segment.strip(),
"label": lbl.split(":")[1].strip()
})
elif lbl in {
"acronym", "publisher", "data type", "data description",
"reference year", "publication year", "data geography", "usage context"
}:
# βœ… Store 'target' and 'value' separately β€” only for usage context
rel_entry = {"idx": i, "type": lbl, "target": segment.strip()}
if lbl == "usage context":
rel_entry["value"] = segment.strip() # target text = value
relations.append(rel_entry)
if not datasets and not relations:
return "<i>No relations found.</i>"
# Build a dict to hold dataset β†’ {relation_type β†’ [targets]}
tree = {ds["name"]: {"_dataset_label": ds["label"]} for ds in datasets}
# Attach relations to nearest dataset mention (proximity-based)
for rel in relations:
if not datasets:
continue
nearest = min(datasets, key=lambda ds: abs(ds["idx"] - rel["idx"]))
if abs(nearest["idx"] - rel["idx"]) <= max_distance:
tree.setdefault(nearest["name"], {"_dataset_label": nearest["label"]})
rel_type = rel["type"]
# βœ… store 'value' separately for usage context
if rel_type == "usage context":
tree[nearest["name"]].setdefault(rel_type, set()).add(rel.get("value", ""))
else:
tree[nearest["name"]].setdefault(rel_type, set()).add(rel["target"])
# --- Render HTML ---
html_out = ["<div style='font-family:monospace; font-size:0.9em; line-height:1.5;'>"]
for ds_name, rels in sorted(tree.items()):
ds_label = rels.get("_dataset_label")
ds_badge = f" <span style='color:gray;'>[{html.escape(ds_label)}]</span>" if ds_label else ""
html_out.append(
f"<details open><summary><b style='color:#4da6ff;'>{html.escape(ds_name)}</b>{ds_badge}</summary><ul>"
)
for rel_type, targets in sorted(rels.items()):
if rel_type == "_dataset_label":
continue
for t in sorted(targets):
# βœ… Conditional rendering for usage context
if rel_type == "usage context":
html_out.append(
f"<li><span style='color:#80c904;'>{html.escape(rel_type)}</span>: "
f"<i>{html.escape(t)}</i></li>"
)
else:
html_out.append(
f"<li><span style='color:#80c904;'>{html.escape(rel_type)}</span>: {html.escape(t)}</li>"
)
html_out.append("</ul></details>")
html_out.append("</div>")
return "\n".join(html_out)
import html
def make_relation_tree_from_results(highlight_output, results):
"""
Build relation tree directly from model results,
ensuring all relations (even those not highlighted) are captured.
"""
if not results:
return "<i>No extracted relations found.</i>"
# Collect dataset mentions and their relations
datasets = []
tree = {}
for item in results:
ds = item.get("datasets")
rels = item.get("relations", [])
if not ds:
continue
ds_name = ds.get("text", "").strip()
ds_label = ds.get("label", "")
if not ds_name:
continue
# Initialize dataset node
if ds_name not in tree:
tree[ds_name] = {"_dataset_label": ds_label}
# Collect all relations
for rel in rels:
rel_type = rel.get("relation", "").strip().lower()
target = rel.get("target", "").strip()
# Skip empty or malformed relations
if not rel_type or not target:
continue
# Special handling for usage context (value-only)
if rel_type == "usage context" and "value" in rel:
target = rel["value"]
# Attach relation under this dataset
tree[ds_name].setdefault(rel_type, set()).add(target)
# Render HTML
html_out = ["<div style='font-family:monospace; font-size:0.9em; line-height:1.5;'>"]
for ds_name, rels in sorted(tree.items()):
ds_label = rels.get("_dataset_label", "")
ds_badge = f" <span style='color:gray;'>[{html.escape(ds_label)}]</span>" if ds_label else ""
html_out.append(
f"<details open><summary><b style='color:#4da6ff;'>{html.escape(ds_name)}</b>{ds_badge}</summary><ul>"
)
for rel_type, targets in sorted(rels.items()):
if rel_type == "_dataset_label":
continue
for t in sorted(targets):
html_out.append(
f"<li><span style='color:#80c904;'>{html.escape(rel_type)}</span>: {html.escape(t)}</li>"
)
html_out.append("</ul></details>")
html_out.append("</div>")
return "\n".join(html_out)
import html
from collections import defaultdict
# ========================================
# Build Tree from Deduplicated Mentions
# ========================================
import html
import re
def make_relation_tree_from_dedup(dedup_results):
"""
Build a cleaned, deduplicated relation tree with prefiltering.
- Publishers that are dataset names are removed
- Years validated with regex \b\d{4}\b
- Empty/noisy values removed
"""
if not dedup_results or not isinstance(dedup_results, list):
return "<i>No deduplicated results found.</i>"
# --- prepare dataset name list for filtering publisher noise ---
dataset_names = {d.get("text", "").lower() for d in dedup_results if d.get("text")}
relation_priority = [
"acronym", "author", "publisher", "data type", "data description",
"reference year", "publication year", "data geography",
"reference population", "usage context"
]
def _filter_values(rel_type, values):
"""relation-specific filtering logic"""
if not values:
return []
# ensure list
if isinstance(values, str):
values = [values]
elif not isinstance(values, (list, set, tuple)):
return []
clean = []
for v in values:
if not v or not isinstance(v, str):
continue
val = v.strip()
if not val:
continue
# publisher should not be dataset name
if rel_type == "publisher":
if val.lower() in dataset_names:
continue
# also skip if it's a dataset acronym
if any(val.lower() in str(a).lower() for d in dedup_results for a in d.get("acronym", [])):
continue
# year fields must contain 4-digit year
if rel_type in {"reference year", "publication year"}:
if not re.search(r"\b\d{4}\b", val):
continue
# simple length sanity check
if len(val) > 150:
continue
clean.append(val)
return sorted(set(clean))
# --- Build HTML output ---
html_out = ["<div style='font-family:monospace; font-size:0.9em; line-height:1.5;'>"]
for entry in dedup_results:
ds_name = entry.get("text", "").strip()
ds_label = entry.get("label", "")
if not ds_name:
continue
ds_badge = f" <span style='color:gray;'>[{html.escape(ds_label)}]</span>" if ds_label else ""
html_out.append(
f"<details open><summary><b style='color:#4da6ff;'>{html.escape(ds_name)}</b>{ds_badge}</summary><ul>"
)
rel_keys = [k for k in entry.keys() if k not in {
"text", "label", "score", "count", "form_counts", "other_datasets",
"mentioned_in_list", "mentioned_in_sentence_list", "pages",
"sources", "start_indices", "end_indices", "raw_contexts"
}]
rel_keys = sorted(
rel_keys,
key=lambda r: relation_priority.index(r) if r in relation_priority else len(relation_priority)
)
for rel_key in rel_keys:
vals = _filter_values(rel_key, entry.get(rel_key))
if not vals:
continue
html_out.append(
f"<li><span style='color:#80c904;'>{html.escape(rel_key)}</span>: "
f"{html.escape('; '.join(vals))}</li>"
)
html_out.append("</ul></details>")
html_out.append("</div>")
return "\n".join(html_out)
with gr.Blocks() as demo:
gr.Markdown("## 🧠 AI for Data Use: Dataset Extraction")
inp = gr.Textbox(
label="Input Text",
lines=10,
placeholder="Paste or type a paragraph here..."
)
run_btn = gr.Button("πŸš€ Run Extraction")
out = gr.HighlightedText(label="Highlighted Datasets and Relations")
tree_btn = gr.Button("🧭 Show / Refresh Relation Tree")
tree_html = gr.HTML(label="Relation Tree", visible=False)
model_state = gr.State() # raw extract_from_text() results
highlight_state = gr.State() # processed highlights
gr.Examples(
examples=example_texts,
inputs=inp,
label="πŸ§ͺ Try these examples"
)
# --- main pipeline ---
def run_pipeline(text):
text = truncate_text(text, max_tokens=300)
highlights, results = process_text(text)
return highlights, results, highlights
run_btn.click(
fn=run_pipeline,
inputs=inp,
outputs=[out, model_state, highlight_state]
).then(
fn=lambda h, m: gr.update(
visible=True,
value=make_relation_tree_from_dedup(deduplicate_mentions(m))
),
inputs=[highlight_state, model_state],
outputs=tree_html
)
# --- manual refresh ---
def refresh_relation_tree(h, m):
if not h or not m:
return gr.update(visible=True, value="<i>No extracted data yet β€” run extraction first.</i>")
html_tree = make_relation_tree_from_dedup(deduplicate_mentions(m))
return gr.update(visible=True, value=html_tree)
tree_btn.click(fn=refresh_relation_tree, inputs=[highlight_state, model_state], outputs=tree_html)
gr.Markdown(f"""
<hr style='border: none; border-top: 1px solid #333; margin: 20px 0;'>
### AI for Data Use: Dataset Extraction
This tool identifies **dataset mentions** (e.g., *Demographic and Health Survey*, *Living Standards and Measurement Survey*, etc.) and extracts **contextual metadata** such as:
- publisher
- publication year
- reference year
- geography
- acronym
- reference population
- data description
- data type
- usage context
**Usage Context Definitions**
- **Primary mention** – the dataset is the **main source of analysis or results** in the study.
- **Supporting mention** – the dataset is **used alongside other data** to complement or validate findings.
- **Background mention** – the dataset is **mentioned for context or comparison** but **not used** in the actual analysis.
#### How to Use
1. Paste or type text into the input box (left), or select one of the provided examples.
2. Click **πŸš€ Run Extraction** to process the text.
3. The model will **highlight all detected dataset mentions** and related entities (e.g., publisher, geography, year, usage context) directly in the text.
4. Below the highlights, a **deduplicated relation tree** will automatically appear, showing each dataset with its extracted metadata and filtered attributes.
5. You can click **🧭 Show / Refresh Relation Tree** anytime to rebuild or inspect the deduplicated metadata view.
<hr style='border: none; border-top: 1px solid #333; margin: 20px 0;'>
**Resources**
- Model: https://huggingface.co/{DATA_MODEL_ID}
- Paper (ArXiv): https://arxiv.org/pdf/2502.10263
- GLiNER Repo: https://github.com/urchade/GLiNER
- Project Docs: https://worldbank.github.io/ai4data-use/docs/introduction.html
""")
demo.launch()