nabin2004 commited on
Commit
7fff173
·
verified ·
1 Parent(s): 5803853

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +2 -0
  2. pyproject.toml +1 -0
  3. relationship_extractor.py +744 -0
  4. uv.lock +0 -0
README.md CHANGED
@@ -1,5 +1,7 @@
1
  # ConceptCloud
2
 
 
 
3
 
4
  ```
5
  pip install openai
 
1
  # ConceptCloud
2
 
3
+ 1. entity_extractor.py
4
+ 2. relationship_extractor.py
5
 
6
  ```
7
  pip install openai
pyproject.toml CHANGED
@@ -8,4 +8,5 @@ dependencies = [
8
  "google-generativeai>=0.8.6",
9
  "openai>=2.28.0",
10
  "pdfplumber>=0.11.9",
 
11
  ]
 
8
  "google-generativeai>=0.8.6",
9
  "openai>=2.28.0",
10
  "pdfplumber>=0.11.9",
11
+ "vllm>=0.16.0",
12
  ]
relationship_extractor.py ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ relationship_extractor.py
4
+ --------------------------
5
+ Builds typed, directed relationship edges between entities extracted from
6
+ a Deep Learning textbook (d2l-en.pdf + kg_nodes.jsonl from vocab_extractor_agent.py).
7
+
8
+ This is Layer 1 of the AlphaManimator concept ontology — the prerequisite
9
+ and dependency graph that drives scene planning and temporal ordering.
10
+
11
+ Pipeline
12
+ --------
13
+ 1. Load entities from kg_nodes.jsonl (output of vocab_extractor_agent.py)
14
+ 2. Extract paragraph-level co-occurrence from PDF → candidate pairs
15
+ 3. Filter pairs by valid type combinations → prune search space
16
+ 4. Send batches to local vLLM server (Qwen2.5-72B or Llama-3.3-70B)
17
+ to classify relationship type
18
+ 5. Post-process: canonicalize direction, deduplicate, confidence filter
19
+ 6. Save kg_edges.jsonl + ontology_graph.json + prerequisite_order.json
20
+
21
+ Output files (all in --output-dir)
22
+ ------------------------------------
23
+ kg_edges.jsonl — one edge per line, Neo4j/NetworkX ready
24
+ ontology_graph.json — full graph: nodes + edges merged
25
+ prerequisite_order.json — topological sort of REQUIRES edges (scene order)
26
+ relation_stats.json — breakdown by relation type
27
+ session.json — resume support (batch checkpointing)
28
+
29
+ Local model setup (RTX 5090, 32GB VRAM)
30
+ -----------------------------------------
31
+ pip install vllm
32
+ vllm serve Qwen/Qwen2.5-72B-Instruct-GPTQ-Int4 \\
33
+ --gpu-memory-utilization 0.90 \\
34
+ --max-model-len 8192
35
+
36
+ Then run:
37
+ python relationship_extractor.py d2l-en.pdf vocab_output/kg_nodes.jsonl
38
+
39
+ Or with Llama:
40
+ vllm serve meta-llama/Llama-3.3-70B-Instruct-AWQ \\
41
+ --gpu-memory-utilization 0.90
42
+ python relationship_extractor.py d2l-en.pdf vocab_output/kg_nodes.jsonl \\
43
+ --model meta-llama/Llama-3.3-70B-Instruct-AWQ
44
+
45
+ AlphaManimator Layer 1 relation types → scene planner semantics
46
+ ----------------------------------------------------------------
47
+ IS_A → zoom-in / hierarchy animation
48
+ USES → head scene visually contains tail component
49
+ REQUIRES → tail must be explained BEFORE head (prerequisite ordering)
50
+ PREVENTS → contrastive animation: show problem, then solution
51
+ MEASURES → link metric scene to task/concept scene
52
+ TRAINS → optimization arc in animation
53
+ PRODUCES → method → output object animation
54
+ EXTENDS → "builds on" narrative bridge
55
+ APPLIED_TO → domain context frame
56
+ """
57
+
58
+ import argparse
59
+ import json
60
+ import re
61
+ import sys
62
+ import time
63
+ from collections import defaultdict
64
+ from itertools import combinations
65
+ from pathlib import Path
66
+ from typing import Optional
67
+
68
+ try:
69
+ import pdfplumber
70
+ except ImportError:
71
+ sys.exit("pip install pdfplumber")
72
+
73
+ try:
74
+ from openai import OpenAI # vLLM exposes OpenAI-compatible API
75
+ except ImportError:
76
+ sys.exit("pip install openai")
77
+
78
+ # ─────────────────────────────────────────────────────────────────
79
+ # RELATION TYPES
80
+ # ─────────────────────────────────────────────────────────────────
81
+
82
+ RELATION_TYPES = {
83
+ "IS_A", # backprop IS_A optimization_method
84
+ "USES", # transformer USES attention
85
+ "REQUIRES", # backprop REQUIRES chain_rule ← KEY for scene ordering
86
+ "PREVENTS", # dropout PREVENTS overfitting
87
+ "MEASURES", # bleu MEASURES translation_quality
88
+ "TRAINS", # sgd TRAINS mlp
89
+ "PRODUCES", # softmax PRODUCES distribution
90
+ "EXTENDS", # adam EXTENDS sgd
91
+ "APPLIED_TO", # classification APPLIED_TO imagenet
92
+ "NONE", # no meaningful relationship → discard
93
+ }
94
+
95
+ # ─────────────────────────────────────────────────────────────────
96
+ # VALID TYPE PAIR COMBINATIONS
97
+ # Only these combinations are sent to the model.
98
+ # Cuts the O(N²) pair space by ~80%.
99
+ # ─────────────────────────────────────────────────────────────────
100
+
101
+ VALID_TYPE_PAIRS = {
102
+ # (head_type, tail_type): [plausible relations]
103
+ ("METHOD", "CONCEPT"): ["USES", "REQUIRES", "PREVENTS", "PRODUCES"],
104
+ ("METHOD", "MATH"): ["USES", "REQUIRES", "PRODUCES"],
105
+ ("METHOD", "METHOD"): ["EXTENDS", "REQUIRES", "USES", "IS_A"],
106
+ ("ARCHITECTURE", "METHOD"): ["USES", "REQUIRES"],
107
+ ("ARCHITECTURE", "CONCEPT"): ["USES", "PRODUCES", "REQUIRES"],
108
+ ("ARCHITECTURE", "ARCHITECTURE"): ["EXTENDS", "USES", "IS_A"],
109
+ ("CONCEPT", "CONCEPT"): ["REQUIRES", "IS_A", "PRODUCES"],
110
+ ("CONCEPT", "MATH"): ["USES", "IS_A"],
111
+ ("TASK", "ARCHITECTURE"): ["USES"],
112
+ ("TASK", "DATASET"): ["APPLIED_TO"],
113
+ ("TASK", "METRIC"): ["MEASURES"],
114
+ ("METRIC", "TASK"): ["MEASURES"],
115
+ ("METRIC", "CONCEPT"): ["MEASURES"],
116
+ ("METHOD", "ARCHITECTURE"): ["TRAINS", "USES"],
117
+ ("TOOL", "ARCHITECTURE"): ["TRAINS"],
118
+ ("TOOL", "METHOD"): ["USES"],
119
+ ("MATH", "CONCEPT"): ["IS_A", "PRODUCES"],
120
+ ("MATH", "MATH"): ["REQUIRES", "IS_A", "USES"],
121
+ }
122
+
123
+ # ─────────────────────────────────────────────────────────────────
124
+ # SYSTEM PROMPT
125
+ # ─────────────────────────────────────────────────────────────────
126
+
127
+ SYSTEM_PROMPT = """You are building a concept ontology for a Deep Learning textbook.
128
+ Your job is to classify the semantic relationship between pairs of ML/DL entities.
129
+
130
+ Relationship types:
131
+ IS_A — subtype or instance (adam IS_A optimizer)
132
+ USES — method/arch uses a component (transformer USES attention)
133
+ REQUIRES — strict prerequisite (backpropagation REQUIRES chain_rule)
134
+ PREVENTS — regularization or mitigation (dropout PREVENTS overfitting)
135
+ MEASURES — metric evaluates concept/task (bleu MEASURES translation)
136
+ TRAINS — optimization method trains architecture (sgd TRAINS mlp)
137
+ PRODUCES — method produces an output object (softmax PRODUCES distribution)
138
+ EXTENDS — builds upon or generalizes (adam EXTENDS sgd)
139
+ APPLIED_TO — task applied to dataset/domain (classification APPLIED_TO imagenet)
140
+ NONE — no clear meaningful relationship
141
+
142
+ Rules:
143
+ - Reply ONLY with a JSON array. No explanation, no markdown, no preamble.
144
+ - Each element: {"h": "<head>", "r": "<RELATION>", "t": "<tail>"}
145
+ - Direction matters: head → tail (the arrow goes from head to tail)
146
+ - REQUIRES means: to understand head, you must first understand tail
147
+ - Use NONE if no clear relationship exists in this domain
148
+ - Be conservative: prefer NONE over a weak guess
149
+ """
150
+
151
+
152
+ def make_batch_prompt(pairs: list[tuple[str, str]]) -> str:
153
+ pair_list = "\n".join(f'("{h}", "{t}")' for h, t in pairs)
154
+ return f"Classify the relationship for each entity pair (head, tail):\n{pair_list}"
155
+
156
+
157
+ # ─────────────────────────────────────────────────────────────────
158
+ # PDF CO-OCCURRENCE EXTRACTION
159
+ # ─────────────────────────────────────────────────────────────────
160
+
161
+ def extract_paragraphs(pdf_path: str, page_range: Optional[tuple] = None) -> list[str]:
162
+ """Extract paragraphs from PDF. A paragraph = contiguous non-empty lines."""
163
+ paragraphs = []
164
+ with pdfplumber.open(pdf_path) as pdf:
165
+ total = len(pdf.pages)
166
+ start, end = (1, total) if page_range is None else page_range
167
+ start, end = max(1, start), min(total, end)
168
+ print(f" → Extracting paragraphs from pages {start}–{end} of {total} …")
169
+
170
+ for i, page in enumerate(pdf.pages[start - 1:end], start=start):
171
+ text = page.extract_text() or ""
172
+ # Split into paragraphs by blank lines
173
+ current = []
174
+ for line in text.split("\n"):
175
+ stripped = line.strip()
176
+ if stripped:
177
+ current.append(stripped)
178
+ else:
179
+ if current:
180
+ paragraphs.append(" ".join(current))
181
+ current = []
182
+ if current:
183
+ paragraphs.append(" ".join(current))
184
+
185
+ if i % 100 == 0:
186
+ print(f" page {i}/{end} — {len(paragraphs)} paragraphs so far")
187
+
188
+ print(f" → Extracted {len(paragraphs):,} paragraphs")
189
+ return paragraphs
190
+
191
+
192
+ def build_cooccurrence_index(
193
+ paragraphs: list[str],
194
+ entity_set: set[str],
195
+ window: int = 1, # paragraphs: entities co-occurring within ±window paragraphs
196
+ ) -> dict[frozenset, int]:
197
+ """
198
+ For each paragraph (±window), find all entity pairs that appear together.
199
+ Returns: {frozenset({e1, e2}): count}
200
+ """
201
+ print(f" → Building co-occurrence index (window={window}) …")
202
+ cooccurrence: dict[frozenset, int] = defaultdict(int)
203
+
204
+ # Normalize entities for matching
205
+ entity_list = sorted(entity_set)
206
+ # Build a lookup: normalized form → canonical entity name
207
+ norm_map = {e.replace("_", " ").replace("-", " ").lower(): e for e in entity_list}
208
+ norm_keys = list(norm_map.keys())
209
+
210
+ def entities_in_text(text: str) -> set[str]:
211
+ text_lower = text.lower()
212
+ found = set()
213
+ for norm, canonical in norm_map.items():
214
+ # Whole-word match
215
+ pattern = r'\b' + re.escape(norm) + r'\b'
216
+ if re.search(pattern, text_lower):
217
+ found.add(canonical)
218
+ return found
219
+
220
+ n = len(paragraphs)
221
+ for i, para in enumerate(paragraphs):
222
+ # Gather entities from window around this paragraph
223
+ window_text = " ".join(paragraphs[max(0, i - window):min(n, i + window + 1)])
224
+ entities_here = entities_in_text(window_text)
225
+
226
+ if len(entities_here) >= 2:
227
+ for e1, e2 in combinations(sorted(entities_here), 2):
228
+ cooccurrence[frozenset({e1, e2})] += 1
229
+
230
+ if i % 500 == 0 and i > 0:
231
+ print(f" paragraph {i}/{n}")
232
+
233
+ print(f" → Found {len(cooccurrence):,} co-occurring pairs")
234
+ return cooccurrence
235
+
236
+
237
+ # ─────────────────────────────────────────────────────────────────
238
+ # CANDIDATE PAIR FILTERING
239
+ # ─────────────────────────────────────────────────────────────────
240
+
241
+ def generate_candidate_pairs(
242
+ entities: dict[str, dict],
243
+ cooccurrence: dict[frozenset, int],
244
+ min_cooccurrence: int = 2,
245
+ top_n_per_entity: int = 15,
246
+ ) -> list[tuple[str, str]]:
247
+ """
248
+ Filters co-occurring pairs by:
249
+ 1. Valid type combination
250
+ 2. Minimum co-occurrence count
251
+ 3. Top-N most frequent co-occurrences per entity (to cap total pairs)
252
+
253
+ Returns directed pairs (head, tail) based on type combination rules.
254
+ """
255
+ print(f" → Generating candidate pairs (min_cooccurrence={min_cooccurrence}) …")
256
+
257
+ # Index co-occurrence by entity for top-N selection
258
+ entity_cooc: dict[str, list[tuple[str, int]]] = defaultdict(list)
259
+ for pair, count in cooccurrence.items():
260
+ if count < min_cooccurrence:
261
+ continue
262
+ e1, e2 = tuple(pair)
263
+ e1_type = entities.get(e1, {}).get("type", "")
264
+ e2_type = entities.get(e2, {}).get("type", "")
265
+
266
+ # Check both directions for valid type pair
267
+ if (e1_type, e2_type) in VALID_TYPE_PAIRS:
268
+ entity_cooc[e1].append((e2, count))
269
+ if (e2_type, e1_type) in VALID_TYPE_PAIRS:
270
+ entity_cooc[e2].append((e1, count))
271
+
272
+ # Select top-N per entity by co-occurrence frequency
273
+ candidate_set: set[tuple[str, str]] = set()
274
+ for entity, partners in entity_cooc.items():
275
+ partners.sort(key=lambda x: -x[1])
276
+ e_type = entities.get(entity, {}).get("type", "")
277
+ for partner, _ in partners[:top_n_per_entity]:
278
+ p_type = entities.get(partner, {}).get("type", "")
279
+ if (e_type, p_type) in VALID_TYPE_PAIRS:
280
+ candidate_set.add((entity, partner))
281
+
282
+ candidates = sorted(candidate_set)
283
+ print(f" → {len(candidates):,} candidate directed pairs after filtering")
284
+ return candidates
285
+
286
+
287
+ # ─────────────────────────────────────────────────────────────────
288
+ # LOCAL MODEL INFERENCE (vLLM OpenAI-compatible)
289
+ # ─────────────────────────────────────────────────────────────────
290
+
291
+ def call_local_model(
292
+ batch: list[tuple[str, str]],
293
+ client: OpenAI,
294
+ model: str,
295
+ ) -> list[dict]:
296
+ """Call local vLLM server with a batch of entity pairs."""
297
+ response = client.chat.completions.create(
298
+ model=model,
299
+ messages=[
300
+ {"role": "system", "content": SYSTEM_PROMPT},
301
+ {"role": "user", "content": make_batch_prompt(batch)},
302
+ ],
303
+ temperature=0.05,
304
+ max_tokens=2048,
305
+ )
306
+ raw = response.choices[0].message.content
307
+ return parse_model_response(raw, batch)
308
+
309
+
310
+ def parse_model_response(text: str, batch: list[tuple[str, str]]) -> list[dict]:
311
+ """Parse model JSON response. Falls back to NONE on failure."""
312
+ text = text.strip()
313
+ text = re.sub(r"^```(?:json)?|```$", "", text, flags=re.MULTILINE).strip()
314
+ try:
315
+ items = json.loads(text)
316
+ result = []
317
+ for item in items:
318
+ h = item.get("h", "").strip().lower().replace(" ", "_")
319
+ r = item.get("r", "NONE").strip().upper()
320
+ t = item.get("t", "").strip().lower().replace(" ", "_")
321
+ if r not in RELATION_TYPES:
322
+ r = "NONE"
323
+ result.append({"head": h, "relation": r, "tail": t})
324
+ return result
325
+ except Exception as e:
326
+ print(f" ⚠ Parse error: {e} — marking batch as NONE")
327
+ return [{"head": h, "relation": "NONE", "tail": t} for h, t in batch]
328
+
329
+
330
+ def run_extraction_batches(
331
+ candidates: list[tuple[str, str]],
332
+ client: OpenAI,
333
+ model: str,
334
+ batch_size: int,
335
+ session: dict,
336
+ session_path: Path,
337
+ ) -> list[dict]:
338
+ """
339
+ Run relationship extraction in batches with session checkpointing.
340
+ Returns list of edge dicts.
341
+ """
342
+ already_done: dict[str, dict] = session.get("edge_decisions", {})
343
+
344
+ def pair_key(h, t):
345
+ return f"{h}|||{t}"
346
+
347
+ todo = [(h, t) for h, t in candidates if pair_key(h, t) not in already_done]
348
+ print(f"\n Model will classify {len(todo):,} pairs "
349
+ f"({len(already_done):,} already done from session)")
350
+
351
+ total_batches = (len(todo) + batch_size - 1) // batch_size
352
+
353
+ for i in range(0, len(todo), batch_size):
354
+ batch = todo[i:i + batch_size]
355
+ batch_num = i // batch_size + 1
356
+ print(f" 🤖 Batch {batch_num}/{total_batches} — {len(batch)} pairs …",
357
+ end=" ", flush=True)
358
+
359
+ retries = 3
360
+ for attempt in range(retries):
361
+ try:
362
+ results = call_local_model(batch, client, model)
363
+
364
+ # Map results back by head entity (model may reorder)
365
+ result_map = {(r["head"], r["tail"]): r["relation"] for r in results}
366
+
367
+ kept = 0
368
+ for h, t in batch:
369
+ rel = result_map.get((h, t), "NONE")
370
+ # Also try normalized variants
371
+ if rel == "NONE":
372
+ h_norm = h.replace("_", "").replace("-", "")
373
+ t_norm = t.replace("_", "").replace("-", "")
374
+ for (rh, rt), rrel in result_map.items():
375
+ if (rh.replace("_","").replace("-","") == h_norm and
376
+ rt.replace("_","").replace("-","") == t_norm):
377
+ rel = rrel
378
+ break
379
+
380
+ edge = {"head": h, "relation": rel, "tail": t}
381
+ already_done[pair_key(h, t)] = edge
382
+ if rel != "NONE":
383
+ kept += 1
384
+
385
+ print(f"done ({kept}/{len(batch)} non-NONE)")
386
+ break
387
+
388
+ except Exception as e:
389
+ if attempt < retries - 1:
390
+ wait = 2 ** attempt
391
+ print(f"\n ⚠ Error: {e}. Retrying in {wait}s …")
392
+ time.sleep(wait)
393
+ else:
394
+ print(f"\n ✗ Failed after {retries} attempts. Marking as NONE.")
395
+ for h, t in batch:
396
+ already_done[pair_key(h, t)] = {"head": h, "relation": "NONE", "tail": t}
397
+
398
+ # Checkpoint after every batch
399
+ session["edge_decisions"] = already_done
400
+ with open(session_path, "w") as f:
401
+ json.dump(session, f, indent=2)
402
+
403
+ # Collect all non-NONE edges
404
+ edges = [
405
+ edge for edge in already_done.values()
406
+ if edge["relation"] != "NONE"
407
+ ]
408
+ return edges
409
+
410
+
411
+ # ─────────────────────────────────────────────────────────────────
412
+ # POST-PROCESSING
413
+ # ─────────────────────────────────────────────────────────────────
414
+
415
+ def postprocess_edges(
416
+ raw_edges: list[dict],
417
+ entities: dict[str, dict],
418
+ ) -> list[dict]:
419
+ """
420
+ 1. Attach entity types to edges
421
+ 2. Deduplicate: if A→B and B→A both exist, keep the one matching
422
+ canonical type-pair direction
423
+ 3. Add confidence proxy (co-occurrence count enriched later)
424
+ """
425
+ print(f" → Post-processing {len(raw_edges)} raw edges …")
426
+
427
+ # Deduplicate: keep canonical direction based on VALID_TYPE_PAIRS
428
+ seen: dict[frozenset, dict] = {}
429
+
430
+ for edge in raw_edges:
431
+ h, r, t = edge["head"], edge["relation"], edge["tail"]
432
+ h_type = entities.get(h, {}).get("type", "OTHER")
433
+ t_type = entities.get(t, {}).get("type", "OTHER")
434
+
435
+ key = frozenset({h, t})
436
+ enriched = {
437
+ "head": h,
438
+ "relation": r,
439
+ "tail": t,
440
+ "head_type": h_type,
441
+ "tail_type": t_type,
442
+ }
443
+
444
+ if key not in seen:
445
+ seen[key] = enriched
446
+ else:
447
+ # Keep the edge whose direction matches VALID_TYPE_PAIRS
448
+ existing = seen[key]
449
+ existing_valid = (existing["head_type"], existing["tail_type"]) in VALID_TYPE_PAIRS
450
+ new_valid = (h_type, t_type) in VALID_TYPE_PAIRS
451
+ if new_valid and not existing_valid:
452
+ seen[key] = enriched
453
+ # If both valid, keep both as separate edges (different relations)
454
+ elif new_valid and existing_valid and existing["relation"] != r:
455
+ # Use a different key to keep both
456
+ seen[frozenset({h, t, r})] = enriched
457
+
458
+ final_edges = list(seen.values())
459
+ print(f" → {len(final_edges)} edges after deduplication")
460
+ return final_edges
461
+
462
+
463
+ def build_prerequisite_order(edges: list[dict]) -> list[str]:
464
+ """
465
+ Topological sort of REQUIRES edges.
466
+ Returns concept names in the order they should be explained
467
+ (tail before head — prerequisite first).
468
+ This is the scene ordering input for AlphaManimator's proxy scorer.
469
+ """
470
+ from collections import deque
471
+
472
+ requires_edges = [(e["head"], e["tail"]) for e in edges if e["relation"] == "REQUIRES"]
473
+ if not requires_edges:
474
+ return []
475
+
476
+ # Build graph: tail → head (explain tail first)
477
+ graph: dict[str, list[str]] = defaultdict(list)
478
+ in_degree: dict[str, int] = defaultdict(int)
479
+ all_nodes: set[str] = set()
480
+
481
+ for head, tail in requires_edges:
482
+ graph[tail].append(head)
483
+ in_degree[head] += 1
484
+ all_nodes.update([head, tail])
485
+
486
+ # Kahn's algorithm
487
+ queue = deque([n for n in all_nodes if in_degree[n] == 0])
488
+ order = []
489
+ while queue:
490
+ node = queue.popleft()
491
+ order.append(node)
492
+ for neighbor in graph[node]:
493
+ in_degree[neighbor] -= 1
494
+ if in_degree[neighbor] == 0:
495
+ queue.append(neighbor)
496
+
497
+ # Append any nodes not in the REQUIRES subgraph (no ordering constraint)
498
+ remaining = all_nodes - set(order)
499
+ order.extend(sorted(remaining))
500
+
501
+ return order
502
+
503
+
504
+ # ─────────────────────────────────────────────────────────────────
505
+ # OUTPUT
506
+ # ─────────────────────────────────────────────────────────────────
507
+
508
+ def save_outputs(
509
+ edges: list[dict],
510
+ entities: dict[str, dict],
511
+ output_dir: Path,
512
+ ):
513
+ output_dir.mkdir(parents=True, exist_ok=True)
514
+
515
+ # ── kg_edges.jsonl ───────────────────────────────────
516
+ with open(output_dir / "kg_edges.jsonl", "w") as f:
517
+ for edge in sorted(edges, key=lambda e: (e["relation"], e["head"])):
518
+ f.write(json.dumps(edge) + "\n")
519
+
520
+ # ── ontology_graph.json ──────────────────────────────
521
+ # Full graph: nodes (from entities) + edges merged
522
+ graph = {
523
+ "nodes": [
524
+ {
525
+ "id": e_id,
526
+ "type": e_data["type"],
527
+ "freq": e_data.get("freq", 0),
528
+ }
529
+ for e_id, e_data in entities.items()
530
+ ],
531
+ "edges": edges,
532
+ "meta": {
533
+ "total_nodes": len(entities),
534
+ "total_edges": len(edges),
535
+ "relation_types": sorted(RELATION_TYPES - {"NONE"}),
536
+ }
537
+ }
538
+ with open(output_dir / "ontology_graph.json", "w") as f:
539
+ json.dump(graph, f, indent=2, ensure_ascii=False)
540
+
541
+ # ── prerequisite_order.json ──────────────────────────
542
+ prereq_order = build_prerequisite_order(edges)
543
+ with open(output_dir / "prerequisite_order.json", "w") as f:
544
+ json.dump({
545
+ "description": (
546
+ "Topological ordering of concepts via REQUIRES edges. "
547
+ "AlphaManimator scene planner must explain concepts in this order."
548
+ ),
549
+ "order": prereq_order,
550
+ "total": len(prereq_order),
551
+ }, f, indent=2)
552
+
553
+ # ── relation_stats.json ──────────────────────────────
554
+ rel_counts: dict[str, int] = defaultdict(int)
555
+ type_pair_counts: dict[str, int] = defaultdict(int)
556
+ for edge in edges:
557
+ rel_counts[edge["relation"]] += 1
558
+ pair = f"{edge['head_type']} → {edge['tail_type']}"
559
+ type_pair_counts[pair] += 1
560
+
561
+ stats = {
562
+ "total_edges": len(edges),
563
+ "by_relation": dict(sorted(rel_counts.items(), key=lambda x: -x[1])),
564
+ "by_type_pair": dict(sorted(type_pair_counts.items(), key=lambda x: -x[1])[:30]),
565
+ "prerequisite_chain_length": len(prereq_order),
566
+ }
567
+ with open(output_dir / "relation_stats.json", "w") as f:
568
+ json.dump(stats, f, indent=2)
569
+
570
+ # ── Print summary ────────────────────────────────────
571
+ print(f"\n ✅ Output saved to: {output_dir}/")
572
+ print(f" kg_edges.jsonl — {len(edges)} edges (Neo4j/NetworkX ready)")
573
+ print(f" ontology_graph.json — full graph ({len(entities)} nodes + {len(edges)} edges)")
574
+ print(f" prerequisite_order.json — {len(prereq_order)} concepts in scene order")
575
+ print(f" relation_stats.json — breakdown by relation type")
576
+ print(f"\n Relation type breakdown:")
577
+ for rel, count in sorted(rel_counts.items(), key=lambda x: -x[1]):
578
+ bar = "█" * min(40, count)
579
+ print(f" {rel:<14} {count:>5} {bar}")
580
+
581
+ if prereq_order:
582
+ print(f"\n First 10 concepts in prerequisite order (AlphaManimator scene sequence):")
583
+ for i, concept in enumerate(prereq_order[:10], 1):
584
+ print(f" {i:>2}. {concept}")
585
+
586
+
587
+ # ─────────────────────────────────────────────────────────────────
588
+ # CLI
589
+ # ─────────────────────────────────────────────────────────────────
590
+
591
+ def parse_page_range(s: str):
592
+ parts = s.split("-")
593
+ if len(parts) != 2:
594
+ raise argparse.ArgumentTypeError("Use format like '1-100'")
595
+ return int(parts[0]), int(parts[1])
596
+
597
+
598
+ def main():
599
+ parser = argparse.ArgumentParser(
600
+ description=(
601
+ "Build a typed relationship graph (Layer 1 concept ontology) "
602
+ "from d2l-en.pdf + kg_nodes.jsonl using a local vLLM model."
603
+ )
604
+ )
605
+ parser.add_argument("pdf",
606
+ help="Path to d2l-en.pdf (or any DL textbook PDF)")
607
+ parser.add_argument("kg_nodes",
608
+ help="Path to kg_nodes.jsonl from vocab_extractor_agent.py")
609
+ parser.add_argument("--model",
610
+ default="Qwen/Qwen2.5-72B-Instruct-GPTQ-Int4",
611
+ help="Model name served by vLLM (default: Qwen2.5-72B-Instruct-GPTQ-Int4)")
612
+ parser.add_argument("--vllm-url",
613
+ default="http://localhost:8000/v1",
614
+ help="vLLM server base URL (default: http://localhost:8000/v1)")
615
+ parser.add_argument("--pages",
616
+ type=parse_page_range, default=None, metavar="START-END",
617
+ help="Page range to extract from PDF, e.g. 1-500 (default: all)")
618
+ parser.add_argument("--batch-size",
619
+ type=int, default=20,
620
+ help="Entity pairs per model call (default: 20, max ~30 for reliability)")
621
+ parser.add_argument("--min-cooccurrence",
622
+ type=int, default=2,
623
+ help="Min paragraph co-occurrence for a pair to be considered (default: 2)")
624
+ parser.add_argument("--top-n-per-entity",
625
+ type=int, default=15,
626
+ help="Max candidate partners per entity, ranked by co-occurrence (default: 15)")
627
+ parser.add_argument("--output-dir",
628
+ default="ontology_output",
629
+ help="Output directory (default: ontology_output/)")
630
+ parser.add_argument("--resume",
631
+ action="store_true",
632
+ help="Resume from existing session.json in output-dir")
633
+ parser.add_argument("--cooccurrence-window",
634
+ type=int, default=1,
635
+ help="Paragraph window for co-occurrence (default: 1 = ±1 paragraph)")
636
+ args = parser.parse_args()
637
+
638
+ pdf_path = Path(args.pdf)
639
+ kg_nodes_path = Path(args.kg_nodes)
640
+ output_dir = Path(args.output_dir)
641
+ session_path = output_dir / "session.json"
642
+ output_dir.mkdir(parents=True, exist_ok=True)
643
+
644
+ if not pdf_path.exists():
645
+ sys.exit(f"PDF not found: {pdf_path}")
646
+ if not kg_nodes_path.exists():
647
+ sys.exit(f"kg_nodes.jsonl not found: {kg_nodes_path}")
648
+
649
+ # ── Load or init session ──────────────────────────────
650
+ session = {}
651
+ if args.resume and session_path.exists():
652
+ with open(session_path) as f:
653
+ session = json.load(f)
654
+ print(f" ↺ Resumed session from {session_path}")
655
+
656
+ # ── Step 1: Load entities ─────────────────────────────
657
+ print(f"\n[1/5] Loading entities from {kg_nodes_path} …")
658
+ entities: dict[str, dict] = {}
659
+ with open(kg_nodes_path) as f:
660
+ for line in f:
661
+ line = line.strip()
662
+ if not line:
663
+ continue
664
+ node = json.loads(line)
665
+ entity_id = node.get("id") or node.get("label", "").replace(" ", "_")
666
+ entities[entity_id] = {
667
+ "type": node.get("type", "OTHER"),
668
+ "freq": node.get("freq", 0),
669
+ "label": node.get("label", entity_id),
670
+ }
671
+ print(f" → Loaded {len(entities):,} entities")
672
+
673
+ entity_set = set(entities.keys())
674
+ # Also add label forms for matching
675
+ label_to_id = {v["label"]: k for k, v in entities.items()}
676
+
677
+ # ── Step 2: Extract paragraphs & co-occurrence ────────
678
+ print(f"\n[2/5] Extracting paragraph co-occurrence from PDF …")
679
+ if "cooccurrence" in session:
680
+ print(" Using cached co-occurrence index from session.")
681
+ cooccurrence = {
682
+ frozenset(k.split("|||")): v
683
+ for k, v in session["cooccurrence"].items()
684
+ }
685
+ else:
686
+ paragraphs = extract_paragraphs(str(pdf_path), args.pages)
687
+ cooccurrence = build_cooccurrence_index(
688
+ paragraphs, entity_set, window=args.cooccurrence_window
689
+ )
690
+ # Serialize for session (frozenset → string key)
691
+ session["cooccurrence"] = {
692
+ "|||".join(sorted(k)): v
693
+ for k, v in cooccurrence.items()
694
+ }
695
+ with open(session_path, "w") as f:
696
+ json.dump(session, f, indent=2)
697
+
698
+ # ── Step 3: Generate candidate pairs ──────────────────
699
+ print(f"\n[3/5] Generating candidate pairs …")
700
+ candidates = generate_candidate_pairs(
701
+ entities,
702
+ cooccurrence,
703
+ min_cooccurrence=args.min_cooccurrence,
704
+ top_n_per_entity=args.top_n_per_entity,
705
+ )
706
+
707
+ # ── Step 4: Run local model ───────────────────────────
708
+ print(f"\n[4/5] Running relationship extraction via local vLLM …")
709
+ print(f" Model : {args.model}")
710
+ print(f" Server: {args.vllm_url}")
711
+ print(f" Batch : {args.batch_size} pairs per call")
712
+
713
+ client = OpenAI(base_url=args.vllm_url, api_key="not-needed")
714
+
715
+ # Quick connectivity check
716
+ try:
717
+ models = client.models.list()
718
+ available = [m.id for m in models.data]
719
+ print(f" ✓ vLLM server reachable. Available models: {available}")
720
+ except Exception as e:
721
+ sys.exit(
722
+ f"\n ✗ Cannot reach vLLM server at {args.vllm_url}\n"
723
+ f" Error: {e}\n\n"
724
+ f" Start the server first:\n"
725
+ f" vllm serve {args.model} --gpu-memory-utilization 0.90\n"
726
+ )
727
+
728
+ raw_edges = run_extraction_batches(
729
+ candidates,
730
+ client,
731
+ args.model,
732
+ args.batch_size,
733
+ session,
734
+ session_path,
735
+ )
736
+
737
+ # ── Step 5: Post-process & save ───────────────────────
738
+ print(f"\n[5/5] Post-processing and saving outputs …")
739
+ final_edges = postprocess_edges(raw_edges, entities)
740
+ save_outputs(final_edges, entities, output_dir)
741
+
742
+
743
+ if __name__ == "__main__":
744
+ main()
uv.lock CHANGED
The diff for this file is too large to render. See raw diff