#!/usr/bin/env python3
"""
graph_reasoning.py
CLI runner for Graph-PRefLexOR-style models:
- Load a user-specified HF model
- Accept a user prompt (arg or stdin)
- Generate with Hugging Face Transformers
- Save prompt, rendered prompt, thinking/content/full output, and graph artifacts
- Extract ..., parse JSON, build NetworkX DiGraph
- Render graph to PNG + SVG (Graphviz dot if available, else spring layout)
- Robust fail-safe crash handling + atomic writes
Example:
python graph_reasoning.py \
--model lamm-mit/Graph-Preflexor-8b_12292025 \
--prompt "Explain dragline silk toughness."
Stdin prompt:
echo "Your prompt here" | python graph_reasoning.py --model ... --prompt -
Notes:
- If the model uses a different thinking end token, pass --think-end-token-id
- If the model doesn't support enable_thinking in apply_chat_template, we fall back safely.
"""
import os
import re
import sys
import json
import math
import time
import argparse
import logging
from datetime import datetime
from typing import Optional, Tuple, Any, Dict
import torch
import networkx as nx
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
# ==============================================================================
# Constants / defaults
# ==============================================================================
GRAPH_JSON_OPEN = ""
GRAPH_JSON_CLOSE = ""
# ==============================================================================
# Helpers: filesystem + parsing
# ==============================================================================
def atomic_write_text(path: str, text: str) -> None:
"""Write text atomically to avoid partial files on crash."""
tmp = path + ".tmp"
with open(tmp, "w", encoding="utf-8") as f:
f.write(text)
os.replace(tmp, path)
def atomic_write_bytes(path: str, data: bytes) -> None:
"""Atomic binary write."""
tmp = path + ".tmp"
with open(tmp, "wb") as f:
f.write(data)
os.replace(tmp, path)
def safe_json_loads(s: str) -> Optional[Any]:
"""Best-effort JSON parsing."""
try:
return json.loads(s)
except Exception:
return None
def now_run_id() -> str:
return datetime.now().strftime("%Y%m%d_%H%M%S")
def resolve_prompt(prompt_arg: str) -> str:
"""
Resolve prompt from:
- literal string
- '-' meaning read stdin fully
- '@path' meaning read prompt from file
"""
if prompt_arg == "-":
return sys.stdin.read().strip()
if prompt_arg.startswith("@"):
path = prompt_arg[1:]
with open(path, "r", encoding="utf-8") as f:
return f.read().strip()
return prompt_arg
def split_thinking_by_token_id(
output_ids: list,
tokenizer,
think_end_id: Optional[int],
) -> Tuple[str, str]:
"""
Split generated token ids into (thinking, final_content) based on think_end_id.
If think_end_id is None or not found, returns ("", decoded_all) as a safe fallback.
"""
if think_end_id is None:
return "", tokenizer.decode(output_ids, skip_special_tokens=True).strip()
try:
# Find first occurrence of think_end_id
idx = output_ids.index(think_end_id) + 1
except ValueError:
idx = 0
thinking = tokenizer.decode(output_ids[:idx], skip_special_tokens=True).strip()
content = tokenizer.decode(output_ids[idx:], skip_special_tokens=True).strip()
return thinking, content
def extract_graph_json_block(text: str) -> Tuple[Optional[str], Optional[dict]]:
"""
Extract first ... block.
Returns (raw_json_text, parsed_obj) or (None, None).
Fail-safe recovery:
- try parsing inner content
- else take largest {...} region inside tag block
"""
m = re.search(
rf"{re.escape(GRAPH_JSON_OPEN)}(.*?){re.escape(GRAPH_JSON_CLOSE)}",
text,
flags=re.DOTALL,
)
if not m:
return None, None
inner = m.group(1).strip()
obj = safe_json_loads(inner)
if obj is not None and isinstance(obj, dict):
return inner, obj
i1 = inner.find("{")
i2 = inner.rfind("}")
if i1 != -1 and i2 != -1 and i2 > i1:
candidate = inner[i1 : i2 + 1].strip()
obj2 = safe_json_loads(candidate)
if obj2 is not None and isinstance(obj2, dict):
return candidate, obj2
return inner, None
# ==============================================================================
# Graph utilities
# ==============================================================================
def build_nx_graph(graph_obj: Dict[str, Any]) -> nx.DiGraph:
"""
Build a NetworkX DiGraph from JSON:
graph_obj["nodes"] = [{"id": "...", ...}, ...]
graph_obj["edges"] = [{"source":"...", "target":"...", "relation":"...", ...}, ...]
"""
G = nx.DiGraph()
nodes = graph_obj.get("nodes", []) or []
edges = graph_obj.get("edges", []) or []
for n in nodes:
if not isinstance(n, dict):
continue
nid = n.get("id")
if nid:
attrs = {k: v for k, v in n.items() if k != "id"}
G.add_node(nid, **attrs)
for e in edges:
if not isinstance(e, dict):
continue
src = e.get("source")
tgt = e.get("target")
if not (src and tgt):
continue
rel = e.get("relation", "")
attrs = {k: v for k, v in e.items() if k not in ("source", "target")}
attrs["relation"] = rel
if src not in G:
G.add_node(src)
if tgt not in G:
G.add_node(tgt)
G.add_edge(src, tgt, **attrs)
return G
def layout_graph(G: nx.DiGraph):
"""
Prefer Graphviz 'dot' layout if available; else spring layout.
"""
try:
from networkx.drawing.nx_pydot import graphviz_layout
pos = graphviz_layout(G, prog="dot")
return pos, "graphviz(dot)"
except Exception:
pos = nx.spring_layout(G, seed=7, k=0.9)
return pos, "spring_layout"
def visualize_and_save_graph(G: nx.DiGraph, out_dir: str, title: str, log: logging.Logger):
"""
Render and save PNG + SVG with edge relation labels.
Fail-safe: saves a minimal plot if something fails.
"""
png_path = os.path.join(out_dir, "graph.png")
svg_path = os.path.join(out_dir, "graph.svg")
if G.number_of_nodes() == 0:
log.warning("Graph has 0 nodes; skipping visualization.")
return None, None
pos, layout_used = layout_graph(G)
log.info(f"Graph layout: {layout_used} | nodes={G.number_of_nodes()} edges={G.number_of_edges()}")
n = G.number_of_nodes()
fig_w = min(22, max(12, 0.9 * math.sqrt(n) * 8))
fig_h = min(12, max(7, 0.6 * math.sqrt(n) * 6))
plt.figure(figsize=(fig_w, fig_h))
try:
nx.draw_networkx_nodes(G, pos, node_size=2200, linewidths=1.2)
nx.draw_networkx_edges(G, pos, arrows=True, arrowstyle="-|>", arrowsize=18, width=1.6)
nx.draw_networkx_labels(G, pos, font_size=10)
edge_labels = {(u, v): (d.get("relation") or "") for u, v, d in G.edges(data=True)}
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=9, rotate=False)
plt.title(f"{title} ({layout_used})")
plt.axis("off")
plt.tight_layout()
plt.savefig(png_path, dpi=300, bbox_inches="tight")
plt.savefig(svg_path, bbox_inches="tight")
plt.close()
return png_path, svg_path
except Exception as e:
log.exception(f"Visualization failed (attempting minimal save): {e}")
plt.clf()
plt.figure(figsize=(12, 7))
nx.draw(G, with_labels=True)
plt.title(f"{title} (minimal)")
plt.axis("off")
plt.tight_layout()
plt.savefig(png_path, dpi=200, bbox_inches="tight")
plt.savefig(svg_path, bbox_inches="tight")
plt.close()
return png_path, svg_path
# ==============================================================================
# Tokenizer / prompt template compatibility
# ==============================================================================
def render_chat_prompt(tokenizer, user_prompt: str, enable_thinking: bool, log: logging.Logger) -> str:
"""
Render prompt using chat template when available.
- Tries enable_thinking=True if requested.
- Falls back to enable_thinking=False.
- Falls back to a minimal plain prompt if apply_chat_template fails.
"""
messages = [{"role": "user", "content": user_prompt}]
if hasattr(tokenizer, "apply_chat_template"):
# Try with enable_thinking if requested
if enable_thinking:
try:
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=True,
)
except TypeError as e:
# Some tokenizers don't accept enable_thinking kwarg
log.warning(f"Tokenizer chat template does not support enable_thinking kwarg: {e}")
except Exception as e:
log.warning(f"apply_chat_template(enable_thinking=True) failed; falling back: {e}")
# Try without enable_thinking
try:
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
)
except Exception as e:
log.warning(f"apply_chat_template failed; falling back to plain prompt: {e}")
# Plain prompt fallback
return user_prompt.strip()
# ==============================================================================
# Main
# ==============================================================================
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(
description="CLI Graph Reasoning Runner (Graph-PRefLexOR style): generate, extract , visualize.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Model/token/auth
p.add_argument("--model", required=True, help="Hugging Face model name or local path")
p.add_argument("--hf-token", default=None, help="HF token (or set HF_TOKEN env var)")
p.add_argument("--revision", default=None, help="Model revision (branch/tag/commit)")
# Prompt
p.add_argument(
"--prompt",
required=True,
help="Prompt text, or '-' for stdin, or '@path' to read from file",
)
p.add_argument(
"--enable-thinking",
action="store_true",
help="Attempt to enable thinking via tokenizer.apply_chat_template(enable_thinking=True)",
)
# Generation
p.add_argument("--max-new-tokens", type=int, default=32768)
p.add_argument("--temperature", type=float, default=0.2)
p.add_argument("--do-sample", action="store_true", help="Enable sampling")
p.add_argument("--top-p", type=float, default=None, help="Optional top_p")
p.add_argument("--top-k", type=int, default=None, help="Optional top_k")
p.add_argument("--repetition-penalty", type=float, default=None, help="Optional repetition penalty")
# Thinking split
p.add_argument(
"--think-end-token-id",
type=int,
default=None,
help="Token id marking end of thinking (e.g., 151668). If unset, no splitting occurs.",
)
# Output
p.add_argument("--out-dir", default=None, help="Output directory (default: ./run_)")
p.add_argument("--run-id", default=None, help="Optional custom run id (default: timestamp)")
p.add_argument("--print-thinking", action="store_true", help="Also print the thinking section to stdout")
p.add_argument("--no-print", action="store_true", help="Do not print model output to stdout")
# Performance/device
p.add_argument("--dtype", default="auto", choices=["auto", "float16", "bfloat16", "float32"], help="torch_dtype")
p.add_argument("--device-map", default="auto", help="Transformers device_map (e.g., auto, cuda:0, cpu)")
p.add_argument("--attn-impl", default=None, help="Optional attn_implementation (e.g., flash_attention_2)")
return p.parse_args()
def setup_outdir(run_id: str, out_dir_arg: Optional[str]) -> str:
if out_dir_arg:
out_dir = os.path.abspath(out_dir_arg)
else:
out_dir = os.path.abspath(f"./run_{run_id}")
os.makedirs(out_dir, exist_ok=True)
return out_dir
def setup_logger(out_dir: str) -> logging.Logger:
log_path = os.path.join(out_dir, "run.log")
logger = logging.getLogger("graph_reasoning")
logger.setLevel(logging.INFO)
logger.handlers = [] # avoid duplicate handlers in repeated runs
fmt = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
fh = logging.FileHandler(log_path)
fh.setFormatter(fmt)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(fmt)
logger.addHandler(fh)
logger.addHandler(sh)
return logger
def torch_dtype_from_arg(dtype: str):
if dtype == "auto":
return "auto"
if dtype == "float16":
return torch.float16
if dtype == "bfloat16":
return torch.bfloat16
if dtype == "float32":
return torch.float32
return "auto"
def main() -> int:
args = parse_args()
run_id = args.run_id or now_run_id()
out_dir = setup_outdir(run_id, args.out_dir)
log = setup_logger(out_dir)
hf_token = args.hf_token or os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_TOKEN")
# Persist run metadata early
meta = {
"run_id": run_id,
"timestamp": datetime.now().isoformat(),
"model": args.model,
"revision": args.revision,
"max_new_tokens": args.max_new_tokens,
"temperature": args.temperature,
"do_sample": bool(args.do_sample),
"top_p": args.top_p,
"top_k": args.top_k,
"repetition_penalty": args.repetition_penalty,
"think_end_token_id": args.think_end_token_id,
"enable_thinking": bool(args.enable_thinking),
"dtype": args.dtype,
"device_map": args.device_map,
"attn_impl": args.attn_impl,
"python": sys.version,
"torch": getattr(torch, "__version__", None),
}
atomic_write_text(os.path.join(out_dir, "run_meta.json"), json.dumps(meta, indent=2))
# Resolve prompt
prompt = resolve_prompt(args.prompt)
if not prompt:
log.error("Prompt is empty.")
return 2
atomic_write_text(os.path.join(out_dir, "prompt.txt"), prompt)
log.info(f"Output dir: {out_dir}")
log.info(f"Model: {args.model}")
if args.revision:
log.info(f"Revision: {args.revision}")
log.info("Loading tokenizer/model...")
# Load tokenizer/model
tok_kwargs = {"token": hf_token} if hf_token else {}
if args.revision:
tok_kwargs["revision"] = args.revision
tokenizer = AutoTokenizer.from_pretrained(args.model, **tok_kwargs)
model_kwargs = {
"device_map": args.device_map,
"token": hf_token if hf_token else None,
}
if args.revision:
model_kwargs["revision"] = args.revision
td = torch_dtype_from_arg(args.dtype)
if td != "auto":
model_kwargs["torch_dtype"] = td
else:
model_kwargs["torch_dtype"] = "auto"
if args.attn_impl:
model_kwargs["attn_implementation"] = args.attn_impl
model = AutoModelForCausalLM.from_pretrained(args.model, **model_kwargs)
model.eval()
# Render chat prompt
rendered = render_chat_prompt(tokenizer, prompt, enable_thinking=args.enable_thinking, log=log)
atomic_write_text(os.path.join(out_dir, "prompt_rendered.txt"), rendered)
# Tokenize
model_inputs = tokenizer(rendered, return_tensors="pt")
# Move inputs to model device where possible
try:
model_inputs = {k: v.to(model.device) for k, v in model_inputs.items()}
except Exception:
# In some device_map setups, model.device may not be meaningful; leave as-is.
pass
# Generation config
gen_cfg_kwargs = dict(
max_new_tokens=args.max_new_tokens,
do_sample=bool(args.do_sample),
temperature=float(args.temperature),
)
if args.top_p is not None:
gen_cfg_kwargs["top_p"] = float(args.top_p)
if args.top_k is not None:
gen_cfg_kwargs["top_k"] = int(args.top_k)
if args.repetition_penalty is not None:
gen_cfg_kwargs["repetition_penalty"] = float(args.repetition_penalty)
gen_config = GenerationConfig(**gen_cfg_kwargs)
log.info("Generating...")
t0 = time.time()
with torch.no_grad():
generated = model.generate(**model_inputs, generation_config=gen_config)
t1 = time.time()
log.info(f"Generation done in {t1 - t0:.2f}s")
# Slice off prompt tokens to get only generated continuation
input_len = model_inputs["input_ids"].shape[1]
output_ids = generated[0, input_len:].tolist()
thinking, content = split_thinking_by_token_id(output_ids, tokenizer, args.think_end_token_id)
# Persist outputs (always)
atomic_write_text(os.path.join(out_dir, "thinking.txt"), thinking or "")
atomic_write_text(os.path.join(out_dir, "content.txt"), content or "")
atomic_write_text(os.path.join(out_dir, "full_output.txt"), (thinking + "\n\n" + content).strip())
# Print
if not args.no_print:
if args.print_thinking and thinking:
sys.stdout.write("\n" + "=" * 80 + "\nTHINKING\n" + "=" * 80 + "\n")
sys.stdout.write(thinking + "\n")
sys.stdout.write("\n" + "=" * 80 + "\nFINAL OUTPUT\n" + "=" * 80 + "\n")
sys.stdout.write(content + "\n")
sys.stdout.flush()
# Extract graph json
raw_block, graph_obj = extract_graph_json_block((thinking or "") + "\n" + (content or ""))
if raw_block is None:
log.warning("No ... block found in output.")
atomic_write_text(os.path.join(out_dir, "graph_status.txt"), "not_found")
return 0
atomic_write_text(os.path.join(out_dir, "graph_json_raw.txt"), raw_block)
if graph_obj is None:
log.warning("Found block, but JSON parsing failed. Saved raw block for inspection.")
atomic_write_text(os.path.join(out_dir, "graph_status.txt"), "found_but_parse_failed")
return 0
atomic_write_text(os.path.join(out_dir, "graph.json"), json.dumps(graph_obj, indent=2, ensure_ascii=False))
atomic_write_text(os.path.join(out_dir, "graph_status.txt"), "parsed_ok")
# Build & visualize graph
G = build_nx_graph(graph_obj)
atomic_write_text(
os.path.join(out_dir, "graph_stats.json"),
json.dumps(
{"nodes": G.number_of_nodes(), "edges": G.number_of_edges()},
indent=2,
),
)
png_path, svg_path = visualize_and_save_graph(G, out_dir, title="Graph Reasoning Output Graph", log=log)
if png_path and svg_path:
log.info(f"Saved graph: {png_path}")
log.info(f"Saved graph: {svg_path}")
return 0
if __name__ == "__main__":
# Hard fail-safe: always write CRASH marker if something bubbles up
_run_id = None
_out_dir = None
_log = None
try:
rc = main()
raise SystemExit(rc)
except SystemExit:
raise
except Exception as e:
# Best-effort to write crash marker if we can infer out_dir from args
try:
# Minimal heuristic: if user passed --out-dir use that; else default to latest run_* in cwd
# (We do not attempt to re-parse args fully here to avoid cascading failures.)
candidates = []
for name in os.listdir("."):
if name.startswith("run_") and os.path.isdir(name):
candidates.append(name)
candidates.sort(reverse=True)
fallback_dir = os.path.abspath(candidates[0]) if candidates else os.path.abspath("./")
atomic_write_text(os.path.join(fallback_dir, "CRASH.txt"), repr(e))
except Exception:
pass
raise