ConceptCloudv1 / entity_extractor.py
nabin2004's picture
Upload folder using huggingface_hub
5803853 verified
#!/usr/bin/env python3
"""
vocab_extractor_agent.py
------------------------
Agentic entity recognizer for PDF vocabulary extraction.
Pipeline
--------
1. Extract text from PDF → tokenise → count frequencies
2. Auto-classify obvious stopwords/noise (no AI needed)
3. Send only AMBIGUOUS words to AI in compact batches
AI decides: entity type or REMOVE
4. Save results in modular KG-ready files
Output structure (all in --output-dir)
---------------------------------------
entities.json — final entity list with type + freq (KG nodes input)
entities.txt — plain word list (one per line)
removed.json — removed words with reason (audit trail)
removed.txt — plain removed list
entity_types.json — grouped by entity type {CONCEPT: [...], METHOD: [...], ...}
session.json — full session state (resume support)
kg_nodes.jsonl — one JSON object per line, ready for Neo4j/NetworkX import
stats.json — run statistics
AI providers
------------
--provider gemini Uses google-generativeai (gemini-2.0-flash)
--provider groq Uses groq (llama-3.3-70b-versatile or mixtral)
--provider openrouter Uses OpenRouter (any model via openai-compatible API)
Entity types used (KG-aligned)
--------------------------------
CONCEPT — abstract ideas: gradient, loss, attention, entropy
METHOD — algorithms/techniques: backpropagation, dropout, adam
ARCHITECTURE — model structures: transformer, resnet, lstm, vit
MATH — mathematical objects: jacobian, eigenvector, softmax
METRIC — evaluation measures: accuracy, bleu, perplexity, f1
DATASET — datasets/benchmarks: imagenet, cifar, squad, glue
TOOL — frameworks/libraries: pytorch, tensorflow, cuda
TASK — ML tasks: classification, translation, segmentation
OTHER — technical but doesn't fit above
REMOVE — not an entity (filler, generic, noise)
Usage
-----
export GEMINI_API_KEY=... # or GROQ_API_KEY
python vocab_extractor_agent.py d2l.pdf --provider gemini
python vocab_extractor_agent.py d2l.pdf --provider groq --model llama-3.3-70b-versatile
python vocab_extractor_agent.py d2l.pdf --provider gemini --min-freq 3 --batch-size 80
python vocab_extractor_agent.py d2l.pdf --provider gemini --resume # resume session
"""
import argparse
import json
import os
import re
import sys
import time
from collections import Counter, defaultdict
from pathlib import Path
from typing import Optional
try:
import pdfplumber
except ImportError:
sys.exit("pip install pdfplumber")
# ─────────────────────────────────────────────────────────────────
# STOPWORDS — never sent to AI, always removed
# ─────────────────────────────────────────────────────────────────
STOPWORDS = {
"is","am","are","was","were","be","been","being",
"do","does","did","doing","have","has","had","having",
"will","would","shall","should","may","might","must","can","could",
"a","an","the",
"in","on","at","to","for","of","with","by","from","into","onto","upon",
"about","above","below","between","through","during","before","after",
"under","over","out","off","up","down","around","along","across",
"and","or","but","nor","so","yet","both","either","neither","not",
"as","if","than","that","which","while","although","because","since",
"unless","until",
"i","me","my","we","our","you","your","he","him","his","she","her",
"it","its","they","them","their","who","whom","whose","what",
"this","these","those","such","itself","themselves","himself","herself",
# PDF/book noise
"section","chapter","fig","figure","table","equation","example",
"note","see","use","used","using","given","let","show","shown",
"result","results","paper","work","approach","method","methods",
"page","pp","ibid","arxiv","doi","http","https","www","com","org",
# generic adverbs/adjectives
"also","just","only","very","more","most","some","any","all","each",
"every","no","here","there","when","where","how","why","then","thus",
"now","new","one","two","three","however","therefore","following",
"however","therefore","moreover","furthermore","nevertheless",
"essentially","generally","typically","usually","often","sometimes",
"first","second","third","last","next","several","many","various",
"same","different","other","another","additional","further","related",
"following","certain","specific","particular","important","simple",
}
# ─────────────────────────────────────────────────────────────────
# PRE-CLASSIFIED seeds — skip AI for these
# ─────────────────────────────────────────────────────────────────
SEED_ENTITIES: dict[str, str] = {
# CONCEPT
"gradient":"CONCEPT","loss":"CONCEPT","objective":"CONCEPT","cost":"CONCEPT",
"activation":"CONCEPT","representation":"CONCEPT","latent":"CONCEPT",
"embedding":"CONCEPT","attention":"CONCEPT","context":"CONCEPT",
"entropy":"CONCEPT","information":"CONCEPT","probability":"CONCEPT",
"distribution":"CONCEPT","expectation":"CONCEPT","variance":"CONCEPT",
"regularization":"CONCEPT","generalization":"CONCEPT","overfitting":"CONCEPT",
"underfitting":"CONCEPT","convergence":"CONCEPT","capacity":"CONCEPT",
"inductive":"CONCEPT","bias":"CONCEPT","complexity":"CONCEPT",
# METHOD
"backpropagation":"METHOD","dropout":"METHOD","normalization":"METHOD",
"augmentation":"METHOD","quantization":"METHOD","pruning":"METHOD",
"distillation":"METHOD","finetuning":"METHOD","pretraining":"METHOD",
"tokenization":"METHOD","bpe":"METHOD","wordpiece":"METHOD",
"attention-mechanism":"METHOD","self-attention":"METHOD",
"cross-attention":"METHOD","weight-decay":"METHOD",
"batch-normalization":"METHOD","layer-normalization":"METHOD",
"gradient-clipping":"METHOD","warmup":"METHOD",
# ARCHITECTURE
"transformer":"ARCHITECTURE","resnet":"ARCHITECTURE","lstm":"ARCHITECTURE",
"gru":"ARCHITECTURE","cnn":"ARCHITECTURE","rnn":"ARCHITECTURE",
"vit":"ARCHITECTURE","bert":"ARCHITECTURE","gpt":"ARCHITECTURE",
"autoencoder":"ARCHITECTURE","vae":"ARCHITECTURE","gan":"ARCHITECTURE",
"diffusion":"ARCHITECTURE","mlp":"ARCHITECTURE","feedforward":"ARCHITECTURE",
"encoder":"ARCHITECTURE","decoder":"ARCHITECTURE","backbone":"ARCHITECTURE",
"llm":"ARCHITECTURE","convolution":"ARCHITECTURE","pooling":"ARCHITECTURE",
# MATH
"jacobian":"MATH","hessian":"MATH","eigenvector":"MATH","eigenvalue":"MATH",
"softmax":"MATH","sigmoid":"MATH","relu":"MATH","tanh":"MATH","gelu":"MATH",
"tensor":"MATH","matrix":"MATH","vector":"MATH","scalar":"MATH",
"svd":"MATH","pca":"MATH","norm":"MATH","derivative":"MATH",
"integral":"MATH","gradient-descent":"MATH","stochastic":"MATH",
# METRIC
"accuracy":"METRIC","precision":"METRIC","recall":"METRIC","f1":"METRIC",
"bleu":"METRIC","rouge":"METRIC","perplexity":"METRIC","auc":"METRIC",
"mse":"METRIC","mae":"METRIC","cross-entropy":"METRIC","kl-divergence":"METRIC",
# DATASET
"imagenet":"DATASET","cifar":"DATASET","mnist":"DATASET","squad":"DATASET",
"glue":"DATASET","superglue":"DATASET","coco":"DATASET","wmt":"DATASET",
# TOOL
"pytorch":"TOOL","tensorflow":"TOOL","jax":"TOOL","cuda":"TOOL",
"numpy":"TOOL","pandas":"TOOL","sklearn":"TOOL","huggingface":"TOOL",
"transformers":"TOOL","keras":"TOOL","triton":"TOOL","tpu":"TOOL",
# TASK
"classification":"TASK","regression":"TASK","segmentation":"TASK",
"detection":"TASK","translation":"TASK","summarization":"TASK",
"generation":"TASK","retrieval":"TASK","ranking":"TASK",
"reinforcement":"TASK","pretraining":"TASK","finetuning":"TASK",
}
# ─────────────────────────────────────────────────────────────────
# VALID ENTITY TYPES
# ─────────────────────────────────────────────────────────────────
ENTITY_TYPES = {"CONCEPT","METHOD","ARCHITECTURE","MATH","METRIC","DATASET","TOOL","TASK","OTHER"}
# ─────────────────────────────────────────────────────────────────
# PDF EXTRACTION
# ─────────────────────────────────────────────────────────────────
def extract_text(pdf_path: str, page_range: tuple | None = None) -> str:
all_text = []
with pdfplumber.open(pdf_path) as pdf:
total = len(pdf.pages)
start, end = (1, total) if page_range is None else page_range
start, end = max(1, start), min(total, end)
print(f" → Extracting pages {start}{end} of {total} …")
for i, page in enumerate(pdf.pages[start-1:end], start=start):
all_text.append(page.extract_text() or "")
if i % 50 == 0:
print(f" page {i}/{end}")
return "\n".join(all_text)
def tokenise(text: str) -> list[str]:
text = re.sub(r"[^\w\s'-]", " ", text.lower())
tokens = []
for tok in text.split():
tok = tok.strip("-'")
if not tok or re.match(r"^\d+$", tok) or len(tok) <= 1:
continue
if tok in STOPWORDS:
continue
tokens.append(tok)
return tokens
# ─────────────────────────────────────────────────────────────────
# PRE-CLASSIFICATION (no AI)
# ─────────────────────────────────────────────────────────────────
def pre_classify(freq: Counter, min_freq: int) -> tuple[dict, list]:
"""
Returns:
decided: {word: {"type": ..., "freq": ..., "source": "seed"|"heuristic"|"stopword"}}
ambiguous: [(word, count), ...] — sent to AI
"""
decided = {}
ambiguous = []
for word, count in freq.most_common():
if count < min_freq:
continue
if word in STOPWORDS:
decided[word] = {"type": "REMOVE", "freq": count, "source": "stopword"}
elif word in SEED_ENTITIES:
decided[word] = {"type": SEED_ENTITIES[word], "freq": count, "source": "seed"}
else:
# Heuristic pre-filter: obvious non-entities
if len(word) <= 2:
decided[word] = {"type": "REMOVE", "freq": count, "source": "heuristic_short"}
elif re.match(r'^[^a-z]', word):
decided[word] = {"type": "REMOVE", "freq": count, "source": "heuristic_nonalpha"}
else:
ambiguous.append((word, count))
return decided, ambiguous
# ─────────────────────────────────────────────────────────────────
# AI PROVIDER WRAPPERS
# ─────────────────────────────────────────────────────────────────
SYSTEM_PROMPT = """You are an entity classifier for a Deep Learning textbook vocabulary.
Classify each word as one of these types:
CONCEPT - abstract DL/ML ideas (gradient, loss, attention, entropy)
METHOD - algorithms/techniques (dropout, backpropagation, adam)
ARCHITECTURE - model structures (transformer, resnet, lstm)
MATH - math objects (jacobian, softmax, eigenvector)
METRIC - evaluation measures (accuracy, bleu, perplexity)
DATASET - datasets/benchmarks (imagenet, squad)
TOOL - frameworks/software (pytorch, cuda)
TASK - ML tasks (classification, translation)
OTHER - technical but doesn't fit above
REMOVE - not a domain entity (generic word, noise, filler)
Rules:
- Reply ONLY with a compact JSON array, nothing else
- Each element: {"w": "<word>", "t": "<TYPE>"}
- No explanation, no preamble, no markdown fences
- When unsure between technical types, use OTHER
- When clearly not a domain term, use REMOVE
"""
def make_user_prompt(batch: list[tuple[str, int]]) -> str:
# Ultra-compact: "word(freq),word(freq),..."
word_list = ",".join(f"{w}({c})" for w, c in batch)
return f"Classify these words (format: word(frequency)):\n{word_list}"
def call_gemini(batch: list[tuple], model: str, api_key: str) -> list[dict]:
try:
import google.generativeai as genai
except ImportError:
sys.exit("pip install google-generativeai")
genai.configure(api_key=api_key)
client = genai.GenerativeModel(
model_name=model,
system_instruction=SYSTEM_PROMPT,
generation_config={"temperature": 0.1, "max_output_tokens": 4096},
)
response = client.generate_content(make_user_prompt(batch))
return parse_ai_response(response.text, batch)
def call_groq(batch: list[tuple], model: str, api_key: str) -> list[dict]:
try:
from groq import Groq
except ImportError:
sys.exit("pip install groq")
client = Groq(api_key=api_key)
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": make_user_prompt(batch)},
],
temperature=0.1,
max_tokens=4096,
)
return parse_ai_response(response.choices[0].message.content, batch)
def call_openrouter(batch: list[tuple], model: str, api_key: str) -> list[dict]:
try:
from openai import OpenAI
except ImportError:
sys.exit("pip install openai")
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=api_key,
)
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": make_user_prompt(batch)},
],
temperature=0.1,
max_tokens=4096,
extra_headers={
"HTTP-Referer": "https://github.com/underdogRE/vocab-extractor",
"X-Title": "vocab-extractor-agent",
},
)
return parse_ai_response(response.choices[0].message.content, batch)
def parse_ai_response(text: str, batch: list[tuple]) -> list[dict]:
"""Parse AI JSON response, fall back to REMOVE on failure."""
text = text.strip()
# Strip markdown fences if AI ignores instructions
text = re.sub(r"^```(?:json)?|```$", "", text, flags=re.MULTILINE).strip()
try:
items = json.loads(text)
# Validate and normalise
result = []
for item in items:
w = item.get("w", "").strip().lower()
t = item.get("t", "REMOVE").strip().upper()
if t not in ENTITY_TYPES and t != "REMOVE":
t = "OTHER"
result.append({"word": w, "type": t})
return result
except Exception as e:
print(f" ⚠ AI parse error: {e} — falling back to REMOVE for batch")
return [{"word": w, "type": "REMOVE"} for w, _ in batch]
def run_ai_batches(
ambiguous: list[tuple],
provider: str,
model: str,
api_key: str,
batch_size: int,
freq: Counter,
session: dict,
session_path: Path,
) -> dict:
"""
Send ambiguous words to AI in batches.
Returns decided dict for ambiguous words.
"""
decided = {}
already_done = session.get("ai_decisions", {})
# Skip words already decided in a previous session
todo = [(w, c) for w, c in ambiguous if w not in already_done]
print(f"\n AI will classify {len(todo)} words "
f"({len(already_done)} already decided from session)")
total_batches = (len(todo) + batch_size - 1) // batch_size
for i in range(0, len(todo), batch_size):
batch = todo[i : i + batch_size]
batch_num = i // batch_size + 1
print(f" 🤖 Batch {batch_num}/{total_batches}{len(batch)} words …", end=" ", flush=True)
retries = 3
for attempt in range(retries):
try:
if provider == "gemini":
results = call_gemini(batch, model, api_key)
elif provider == "groq":
results = call_groq(batch, model, api_key)
elif provider == "openrouter":
results = call_openrouter(batch, model, api_key)
else:
raise ValueError(f"Unknown provider: {provider}")
# Map results back (AI may reorder or miss words)
result_map = {r["word"]: r["type"] for r in results}
for word, count in batch:
etype = result_map.get(word, "OTHER")
decided[word] = {"type": etype, "freq": count, "source": "ai"}
already_done[word] = {"type": etype, "freq": count, "source": "ai"}
kept_in_batch = sum(1 for w, _ in batch if decided.get(w, {}).get("type") != "REMOVE")
print(f"done ({kept_in_batch}/{len(batch)} kept)")
break
except Exception as e:
if attempt < retries - 1:
wait = 2 ** attempt
print(f"\n ⚠ Error: {e}. Retrying in {wait}s …")
time.sleep(wait)
else:
print(f"\n ✗ Failed after {retries} attempts. Marking batch as REMOVE.")
for word, count in batch:
decided[word] = {"type": "REMOVE", "freq": count, "source": "ai_failed"}
already_done[word] = decided[word]
# Save session after every batch
session["ai_decisions"] = already_done
with open(session_path, "w") as f:
json.dump(session, f, indent=2)
# Rate limit buffer
if provider in ("gemini", "openrouter"):
time.sleep(0.5)
return decided
# ─────────────────────────────────────────────────────────────────
# OUTPUT — KG-READY MODULAR FILES
# ─────────────────────────────────────────────────────────────────
def save_outputs(all_decisions: dict, output_dir: Path):
output_dir.mkdir(parents=True, exist_ok=True)
entities = {w: d for w, d in all_decisions.items() if d["type"] != "REMOVE"}
removed = {w: d for w, d in all_decisions.items() if d["type"] == "REMOVE"}
by_type: dict[str, list] = defaultdict(list)
for w, d in entities.items():
by_type[d["type"]].append({"word": w, "freq": d["freq"]})
# Sort each type group by freq desc
for t in by_type:
by_type[t].sort(key=lambda x: -x["freq"])
# ── entities.json ────────────────────────────────────
entities_json = {
"total": len(entities),
"entities": {
w: {"type": d["type"], "freq": d["freq"], "source": d["source"]}
for w, d in sorted(entities.items(), key=lambda x: -x[1]["freq"])
}
}
_write(output_dir / "entities.json", entities_json)
# ── entities.txt ────────────────────────────────────
with open(output_dir / "entities.txt", "w") as f:
for w, d in sorted(entities.items(), key=lambda x: -x[1]["freq"]):
f.write(f"{w}\n")
# ── removed.json ────────────────────────────────────
removed_json = {
"total": len(removed),
"removed": {
w: {"freq": d["freq"], "reason": d["source"]}
for w, d in sorted(removed.items(), key=lambda x: -x[1]["freq"])
}
}
_write(output_dir / "removed.json", removed_json)
# ── removed.txt ────────────────────────────────────
with open(output_dir / "removed.txt", "w") as f:
for w in sorted(removed, key=lambda x: -removed[x]["freq"]):
f.write(f"{w}\n")
# ── entity_types.json ───────────────────────────────
_write(output_dir / "entity_types.json", dict(by_type))
# ── kg_nodes.jsonl ──────────────────────────────────
# One JSON object per line — direct input for Neo4j/NetworkX/RDFLib
with open(output_dir / "kg_nodes.jsonl", "w") as f:
for w, d in sorted(entities.items(), key=lambda x: -x[1]["freq"]):
node = {
"id": w.replace(" ", "_").replace("-", "_"),
"label": w,
"type": d["type"],
"freq": d["freq"],
"source": d["source"],
# KG-ready fields for later phases
"aliases": [], # fill in Phase 2: coreference
"definition": "", # fill in Phase 2: from book passages
"relations": [], # fill in Phase 2: co-occurrence graph
}
f.write(json.dumps(node) + "\n")
# ── stats.json ──────────────────────────────────────
type_counts = {t: len(ws) for t, ws in by_type.items()}
source_counts = defaultdict(int)
for d in all_decisions.values():
source_counts[d["source"]] += 1
stats = {
"total_words_processed": len(all_decisions),
"entities_kept": len(entities),
"words_removed": len(removed),
"by_entity_type": type_counts,
"by_decision_source": dict(source_counts),
}
_write(output_dir / "stats.json", stats)
# ── Print summary ────────────────────────────────────
print(f"\n ✅ Output saved to: {output_dir}/")
print(f" entities.json — {len(entities)} entities (with type + freq + source)")
print(f" entities.txt — plain word list")
print(f" removed.json — {len(removed)} removed words (with reason)")
print(f" removed.txt — plain removed list")
print(f" entity_types.json — grouped by type")
print(f" kg_nodes.jsonl — KG-ready node records (Neo4j/NetworkX)")
print(f" stats.json — run statistics")
print(f"\n Entity type breakdown:")
for t, count in sorted(type_counts.items(), key=lambda x: -x[1]):
bar = "█" * min(30, count)
print(f" {t:<14} {count:>5} {bar}")
def _write(path: Path, data):
with open(path, "w") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
# ─────────────────────────────────────────────────────────────────
# CLI
# ─────────────────────────────────────────────────────────────────
def parse_page_range(s: str) -> tuple[int, int]:
parts = s.split("-")
if len(parts) != 2:
raise argparse.ArgumentTypeError("Use format like '1-100'")
return int(parts[0]), int(parts[1])
def main():
parser = argparse.ArgumentParser(
description="Agentic entity recognizer — extracts KG-ready vocabulary from a PDF."
)
parser.add_argument("pdf", help="Path to the PDF")
parser.add_argument(
"--provider", choices=["gemini", "groq", "openrouter"], default="gemini",
help="AI provider (default: gemini)"
)
parser.add_argument(
"--model", default=None,
help="Model name override. Defaults: gemini=gemini-2.0-flash, groq=llama-3.3-70b-versatile"
)
parser.add_argument(
"--pages", type=parse_page_range, default=None, metavar="START-END",
help="Page range, e.g. 1-100 (default: all)"
)
parser.add_argument(
"--min-freq", type=int, default=3,
help="Min word frequency to consider (default: 3)"
)
parser.add_argument(
"--batch-size", type=int, default=80,
help="Words per AI call (default: 80, max ~120 for reliability)"
)
parser.add_argument(
"--output-dir", default="vocab_output",
help="Output directory (default: vocab_output/)"
)
parser.add_argument(
"--resume", action="store_true",
help="Resume from existing session.json in output-dir"
)
args = parser.parse_args()
# ── Resolve model defaults ────────────────────────────
if args.model is None:
args.model = {
"gemini": "gemini-2.0-flash",
"groq": "llama-3.3-70b-versatile",
"openrouter": "meta-llama/llama-3.3-70b-instruct",
}[args.provider]
# ── Resolve API key ───────────────────────────────────
key_env = {
"gemini": "GEMINI_API_KEY",
"groq": "GROQ_API_KEY",
"openrouter": "OPENROUTER_API_KEY",
}[args.provider]
api_key = os.environ.get(key_env, "")
if not api_key:
sys.exit(f"Set {key_env} environment variable first.")
pdf_path = Path(args.pdf)
if not pdf_path.exists():
sys.exit(f"File not found: {pdf_path}")
output_dir = Path(args.output_dir)
session_path = output_dir / "session.json"
output_dir.mkdir(parents=True, exist_ok=True)
# ── Load or init session ──────────────────────────────
session = {}
if args.resume and session_path.exists():
with open(session_path) as f:
session = json.load(f)
print(f" ↺ Resumed session from {session_path}")
# ── Step 1: Extract ───────────────────────────────────
print(f"\n[1/4] Extracting text …")
if "raw_freq" in session:
print(" Using cached token frequencies from session.")
freq = Counter(session["raw_freq"])
else:
raw_text = extract_text(str(pdf_path), args.pages)
tokens = tokenise(raw_text)
freq = Counter(tokens)
session["raw_freq"] = dict(freq)
print(f" {len(tokens):,} tokens → {len(freq):,} unique")
with open(session_path, "w") as f:
json.dump(session, f, indent=2)
# ── Step 2: Pre-classify ──────────────────────────────
print(f"\n[2/4] Pre-classifying (no AI) …")
pre_decided, ambiguous = pre_classify(freq, args.min_freq)
auto_keep = sum(1 for d in pre_decided.values() if d["type"] != "REMOVE")
auto_remove = sum(1 for d in pre_decided.values() if d["type"] == "REMOVE")
print(f" {auto_keep} auto-kept (seeds/heuristics)")
print(f" {auto_remove} auto-removed (stopwords/heuristics)")
print(f" {len(ambiguous)} ambiguous → sending to {args.provider} AI")
# ── Step 3: AI classification ─────────────────────────
print(f"\n[3/4] AI entity classification ({args.provider} / {args.model}) …")
ai_decided = run_ai_batches(
ambiguous, args.provider, args.model, api_key,
args.batch_size, freq, session, session_path
)
# ── Merge all decisions ───────────────────────────────
all_decisions = {**pre_decided, **ai_decided}
# ── Step 4: Save outputs ──────────────────────────────
print(f"\n[4/4] Saving KG-ready outputs …")
save_outputs(all_decisions, output_dir)
if __name__ == "__main__":
main()