{"repo_id":"Program_Conditioned_Adapter","entity_id":"py:run","uri":"program://Program_Conditioned_Adapter/module/run#L1-L395","kind":"module","name":"run","path":"run.py","language":"python","start_line":1,"end_line":395,"context_start_line":1,"context_end_line":395,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport argparse\nfrom pathlib import Path\nimport subprocess\nfrom typing import Dict, Any\nimport json\nimport os as _os\nimport importlib\n\nimport numpy as np\nfrom model.hf_snapshot import ensure_snapshot\nfrom modules.retrieval_policy import RetrievalPolicy # type: ignore\nfrom modules.runner_core import select_region, prepare_citations # type: ignore\n\n\n\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n # Model & sources\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--sources\", default=str(_root()), help=\"Program sources root (path or URI)\")\n p.add_argument(\"--program\", default=None, help=\"Alias for --sources (program root path or URI)\")\n p.add_argument(\"--prompt\", default=(\n \"Explain how generation works for this program. Cite path:line for each claim.\"\n ))\n\n # PCA program options (program-agnostic; backend not enforced here)\n p.add_argument(\"--backend\", default=None, help=\"Program backend id (informational; use --pg-backend to enable ProgramGraph features)\")\n p.add_argument(\"--retrieval-policy\", dest=\"retrieval_policy\", default=None, help='Retrieval mix like \"sim:0.6,struct:0.4\"')\n p.add_argument(\"--retrieval-temp\", dest=\"retrieval_temp\", type=float, default=None)\n p.add_argument(\"--use-cache\", action=\"store_true\", help=\"Prefer caches when available (symbol/windows/facts)\")\n p.add_argument(\"--citations-enforce\", action=\"store_true\", help=\"Require citations policy in PCA layer\")\n p.add_argument(\"--citations-repair\", action=\"store_true\")\n p.add_argument(\"--pg-backend\", default=None, help=\"Dotted path to ProgramGraph factory, e.g. 'examples.python_repo_grounded_qa.python_repo_graph:PythonRepoGraph'\")\n p.add_argument(\"--program-state\", default=None, help=\"Optional path to .program_state.json (defaults to /.program_state.json if present)\")\n\n # Base adapters generation\n p.add_argument(\"--adapters-dir\", default=None, help=\"Directory to save/find base adapters (defaults under examples/.../artifacts/base_adapters)\")\n p.add_argument(\"--base-rank\", type=int, default=8)\n p.add_argument(\"--embed-dim\", type=int, default=1536)\n p.add_argument(\"--include-text\", action=\"store_true\")\n p.add_argument(\"--text-max-bytes\", type=int, default=0)\n p.add_argument(\"--max-text-tokens\", type=int, default=0)\n p.add_argument(\"--seed\", type=int, default=0)\n\n # Runner mixing and capacity\n p.add_argument(\"--alpha\", type=float, default=20.0)\n p.add_argument(\"--rank\", type=int, default=12)\n p.add_argument(\"--gsub\", type=float, default=0.75)\n p.add_argument(\"--mix-beta\", type=float, default=0.1)\n p.add_argument(\"--target-weights\", default=None, help=\"CSV like q_proj=1,o_proj=1.1,up_proj=1.1,down_proj=1.05\")\n p.add_argument(\"--knowledge-preset\", action=\"store_true\", help=\"Use a preset of target weights tuned for knowledge recall (boost o/up/down; modest q/k/v)\")\n p.add_argument(\"--code-recall-preset\", action=\"store_true\", help=\"Opt-in preset tuned for code recall (o,v,up,down,gate emphasized; light q,k)\")\n p.add_argument(\"--delta-cap\", type=float, default=0.05, help=\"AB-norm clipping cap relative to base weight norm per layer (0 disables)\")\n # Entropy-aware capacity (forward to enhanced runner)\n p.add_argument(\"--entropy-aware\", action=\"store_true\")\n p.add_argument(\"--rank-min\", type=int, default=8)\n p.add_argument(\"--rank-max\", type=int, default=32)\n p.add_argument(\"--gsub-min\", type=float, default=0.6)\n p.add_argument(\"--gsub-max\", type=float, default=0.9)\n p.add_argument(\"--entropy-weights\", default=\"program=0.4,subgraph=0.4,question=0.2\")\n\n # Selection & context packing\n p.add_argument(\"--of-sources\", choices=[\"question\", \"zoom\"], default=\"question\")\n p.add_argument(\"--zoom-symbol\", default=None)\n p.add_argument(\"--zoom-radius\", type=int, default=0)\n p.add_argument(\"--pack-context\", action=\"store_true\")\n p.add_argument(\"--pack-mode\", choices=[\"heads\", \"windows\"], default=\"heads\")\n p.add_argument(\"--context-tokens\", type=int, default=3000)\n p.add_argument(\"--require-citations\", action=\"store_true\")\n p.add_argument(\"--citations-per-paragraph\", action=\"store_true\")\n p.add_argument(\"--function-first\", action=\"store_true\")\n p.add_argument(\"--ff-max-candidates\", type=int, default=24)\n p.add_argument(\"--ff-window-lines\", type=int, default=80)\n p.add_argument(\"--ff-threshold\", type=float, default=0.55)\n p.add_argument(\"--ff-noise-penalty\", type=float, default=0.30)\n # Layer schedule and q-aware weights (opt-in)\n p.add_argument(\"--layer-schedule\", action=\"store_true\", help=\"Enable a light per-layer multiplier rising toward top third (additive; default off)\")\n p.add_argument(\"--q-aware-weights\", action=\"store_true\", help=\"Heuristic reweighting of targets by question intent (additive; default off)\")\n p.add_argument(\"--per-target-rank-schedule\", action=\"store_true\", help=\"Trim per-target effective rank at run-time (additive; default off)\")\n p.add_argument(\"--rank-budget\", type=int, default=0, help=\"Optional per-layer rank budget across targets; rescale keeps to meet budget (0 disables)\")\n p.add_argument(\"--ablate-attn\", action=\"store_true\", help=\"Ablate attention targets (q/k/v/o) by zeroing their weights\")\n p.add_argument(\"--ablate-mlp\", action=\"store_true\", help=\"Ablate MLP targets (up/down/gate) by zeroing their weights\")\n p.add_argument(\"--layer-rank-tiers\", action=\"store_true\", help=\"Use top/mid/low thirds per-layer rank keeps by target group (opt-in)\")\n # Mixture bank (opt-in)\n p.add_argument(\"--mixture-m\", type=int, default=0, help=\"Top-m subgraph adapters from bank to mix (0 disables)\")\n p.add_argument(\"--adapters-bank\", default=None, help=\"Path to a bank of sub_adapters (from build --per-module)\")\n # Adapter mapping is always enhanced; no CLI toggle\n # Alpha warmup and decoding hooks (opt-in)\n p.add_argument(\"--alpha-warmup\", action=\"store_true\", help=\"Use a lighter alpha on first attempt (or first structured pass), then full alpha on retry/subsequent passes\")\n p.add_argument(\"--adapter-aware-decoding\", action=\"store_true\", help=\"Slightly relax sampling and prompt pointer-first when citations are required\")\n # Telemetry verification (opt-in)\n p.add_argument(\"--telemetry-tests\", action=\"store_true\", help=\"Attempt simple verify_with_tests() on a few selected modules and record results (structured path only)\")\n # Reranking\n p.add_argument(\"--rerank\", action=\"store_true\")\n p.add_argument(\"--self-queries\", default=None, help=\"Path to self_queries.jsonl for retrieval boosts\")\n\n # Sampling controls\n p.add_argument(\"--do-sample\", action=\"store_true\")\n p.add_argument(\"--temperature\", type=float, default=0.7)\n p.add_argument(\"--top-p\", type=float, default=0.9)\n p.add_argument(\"--repetition-penalty\", type=float, default=1.1)\n p.add_argument(\"--min-new-tokens\", type=int, default=64)\n p.add_argument(\"--max-new-tokens\", type=int, default=512)\n # Local generation/perf controls\n p.add_argument(\"--kv-window\", type=int, default=0, help=\"Optional sliding KV window length; 0 disables\")\n p.add_argument(\"--head-device\", choices=[\"same\", \"cpu\", \"auto\"], default=\"same\", help=\"Place lm_head on cpu/auto to save VRAM\")\n\n # Device & telemetry\n p.add_argument(\"--device-map\", default=\"auto\", choices=[\"auto\", \"none\"])\n p.add_argument(\"--gpu-ids\", default=None)\n p.add_argument(\"--max-memory\", default=None)\n p.add_argument(\"--telemetry-out\", default=None)\n # DCPO/Structured controls\n p.add_argument(\"--structured\", action=\"store_true\")\n p.add_argument(\"--lfp-iters\", type=int, default=1)\n p.add_argument(\"--budget-H\", type=float, default=0.0)\n p.add_argument(\"--monotone-selection\", action=\"store_true\")\n p.add_argument(\"--samples\", type=int, default=1)\n p.add_argument(\"--cone-join\", choices=[\"concat\", \"weighted\"], default=\"concat\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\", help=\"Cache directory for HF models/tokenizers\")\n p.add_argument(\"--verbose\", action=\"store_true\")\n p.add_argument(\"--no-adapters\", action=\"store_true\", help=\"Disable applying adapters in the enhanced runner\")\n p.add_argument(\"--commit-footer\", action=\"store_true\", help=\"Append 'answer valid for commit X' footer\")\n args = p.parse_args()\n\n # Normalize program root (prefer --sources; allow --program alias)\n src_arg = args.sources\n if getattr(args, \"program\", None):\n src_arg = args.program if args.program else src_arg\n program_root = Path(src_arg)\n # Example directory for artifacts colocated with this run.py\n example_dir = Path(__file__).resolve().parent\n artifacts = example_dir / \"artifacts\"\n base_dir = Path(args.adapters_dir) if args.adapters_dir else (artifacts / \"base_adapters\")\n base_dir.mkdir(parents=True, exist_ok=True)\n cache_dir = Path(args.cache_dir)\n cache_dir.mkdir(parents=True, exist_ok=True)\n adapters_npz = base_dir / \"adapters.npz\"\n manifest_path = base_dir / \"manifest.json\"\n # Resolve program state path\n ps_path = None\n if args.program_state:\n ps_path = Path(args.program_state)\n else:\n default_ps = base_dir / \".program_state.json\"\n if default_ps.exists():\n ps_path = default_ps\n\n # Generate base adapters if not present by delegating to build.py (program-agnostic)\n if not adapters_npz.exists():\n cmd = [\n sys.executable,\n str(Path(__file__).resolve().parent / \"build.py\"),\n \"--sources\", str(program_root),\n \"--model\", str(args.model),\n \"--adapters-dir\", str(base_dir),\n \"--embed-dim\", str(int(args.embed_dim)),\n \"--seed\", str(int(args.seed)),\n ]\n if bool(args.include_text):\n cmd.append(\"--include-text\")\n if int(args.text_max_bytes) > 0:\n cmd += [\"--text-max-bytes\", str(int(args.text_max_bytes))]\n if int(args.max_text_tokens) > 0:\n cmd += [\"--max-text-tokens\", str(int(args.max_text_tokens))]\n if bool(args.knowledge_preset):\n cmd.append(\"--knowledge-preset\")\n if bool(args.code_recall_preset):\n cmd.append(\"--code-recall-preset\")\n subprocess.check_call(cmd)\n\n # Run directly via orchestrator\n from modules.runner import generate_answer\n from modules.runner import generate_answer_structured\n\n if args.gpu_ids and str(args.gpu_ids).strip():\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu_ids).strip()\n os.environ.setdefault(\"PYTORCH_CUDA_ALLOC_CONF\", \"expandable_segments:True\")\n\n # ProgramGraph + RetrievalPolicy selection (seed zoom) if requested\n zoom_seed = None\n try:\n if args.pg_backend:\n # Load PG factory dynamically only when explicitly provided\n def _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n pg_ctor = _load_symbol(args.pg_backend)\n pg = pg_ctor(str(program_root), ignore=None)\n pol = RetrievalPolicy.from_spec(args.retrieval_policy, temp=args.retrieval_temp)\n region_ids = select_region(args.prompt, pg, pol, top_k=8)\n ents_by_id = {e.id: e for e in pg.entities()}\n names = []\n for eid in region_ids[:4]:\n e = ents_by_id.get(eid)\n if e and e.name:\n names.append(e.name)\n if names:\n zoom_seed = \",\".join(names)\n except Exception:\n zoom_seed = None\n\n if bool(args.structured):\n res = generate_answer_structured(\n model_id=args.model,\n adapters_npz=str(adapters_npz),\n program_root=str(program_root),\n delta_cap=float(max(0.0, args.delta_cap)),\n prompt=args.prompt,\n cache_dir=str(cache_dir),\n device_map=str(args.device_map),\n alpha=float(args.alpha),\n rank=int(args.rank),\n gsub=float(args.gsub),\n beta=float(args.mix_beta),\n of_sources=(\"zoom\" if (zoom_seed and not args.zoom_symbol) else args.of_sources),\n zoom_symbol=(args.zoom_symbol or (zoom_seed or None)),\n zoom_radius=int(args.zoom_radius),\n pack_context=bool(args.pack_context),\n pack_mode=args.pack_mode,\n context_tokens=int(args.context_tokens),\n require_citations=bool(args.require_citations),\n citations_per_paragraph=bool(args.citations_per_paragraph),\n function_first=bool(args.function_first),\n ff_max_candidates=int(args.ff_max_candidates),\n ff_window_lines=int(args.ff_window_lines),\n ff_threshold=float(args.ff_threshold),\n ff_noise_penalty=float(args.ff_noise_penalty),\n do_sample=bool(args.do_sample),\n temperature=float(args.temperature),\n top_p=float(args.top_p),\n repetition_penalty=float(args.repetition_penalty),\n min_new_tokens=int(args.min_new_tokens),\n max_new_tokens=int(args.max_new_tokens),\n kv_window=int(args.kv_window),\n head_device=str(args.head_device),\n seed=int(args.seed),\n entropy_aware=bool(args.entropy_aware),\n rank_min=int(args.rank_min),\n rank_max=int(args.rank_max),\n gsub_min=float(args.gsub_min),\n gsub_max=float(args.gsub_max),\n entropy_weights=str(args.entropy_weights),\n target_weights=(str(args.target_weights) if args.target_weights else (\n \"o_proj=1.15,v_proj=1.10,up_proj=1.10,down_proj=1.05,gate_proj=1.00,q_proj=0.95,k_proj=0.90\" if args.code_recall_preset else (\n \"q_proj=0.95,k_proj=0.95,v_proj=0.95,o_proj=1.10,up_proj=1.10,down_proj=1.05\" if args.knowledge_preset else None\n )\n )),\n rerank=bool(args.rerank),\n self_queries_path=(str(args.self_queries) if args.self_queries else None),\n commit_footer=bool(args.commit_footer),\n verbose=bool(args.verbose),\n lfp_iters=int(args.lfp_iters),\n budget_H=float(args.budget_H),\n monotone_selection=bool(args.monotone_selection),\n program_state_path=(str(ps_path) if ps_path else None),\n samples=int(args.samples),\n cone_join=str(args.cone_join),\n telemetry_out=(str(args.telemetry_out) if args.telemetry_out else None),\n layer_schedule=bool(args.layer_schedule),\n q_aware_weights=bool(args.q_aware_weights),\n mixture_m=int(args.mixture_m),\n adapters_bank=(str(args.adapters_bank) if args.adapters_bank else None),\n # Forward additional knobs to structured path\n per_target_rank_schedule=bool(args.per_target_rank_schedule),\n rank_budget=int(args.rank_budget),\n ablate_attn=bool(args.ablate_attn),\n ablate_mlp=bool(args.ablate_mlp),\n alpha_warmup=bool(args.alpha_warmup),\n adapter_aware_decoding=bool(args.adapter_aware_decoding),\n layer_rank_tiers=bool(args.layer_rank_tiers),\n telemetry_verify_tests=bool(args.telemetry_tests),\n )\n # PCA evidence/provenance stamping when enabled\n try:\n if (args.citations_enforce or args.citations_per_paragraph or args.citations_repair) and args.pg_backend:\n # Load manifest (for provenance)\n mf_obj: Dict[str, Any] = {}\n try:\n if os.path.exists(manifest_path):\n mf_obj = json.loads(open(manifest_path, \"r\", encoding=\"utf-8\").read())\n except Exception:\n mf_obj = {}\n # Minimal unit\n unit = {\"text\": res.get(\"text\", \"\"), \"evidence\": []}\n citations_policy = {\n \"enforce\": bool(args.citations_enforce),\n \"per_paragraph\": bool(args.citations_per_paragraph),\n \"repair\": bool(args.citations_repair),\n }\n # Build ProgramGraph for stamping when provided\n def _load_symbol2(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n pg_ctor2 = _load_symbol2(args.pg_backend)\n pg2 = pg_ctor2(str(program_root), ignore=None)\n pol2 = RetrievalPolicy.from_spec(args.retrieval_policy, temp=args.retrieval_temp)\n region_ids2 = select_region(args.prompt, pg2, pol2, top_k=8)\n stamped = prepare_citations([unit], region_ids2, pg2, citations_policy=citations_policy, manifest=mf_obj)\n if stamped:\n res[\"evidence\"] = stamped[0].get(\"evidence\", [])\n res[\"provenance\"] = stamped[0].get(\"provenance\", res.get(\"provenance\"))\n except Exception:\n pass\n print(res.get(\"text\", \"\"))\n if args.verbose:\n try:\n import json as _json\n print(_json.dumps({\n \"must\": len(res.get(\"must\", [])),\n \"may\": len(res.get(\"may\", [])),\n \"lfp_passes\": res.get(\"lfp_passes\"),\n \"converged\": res.get(\"converged\"),\n \"confidence\": res.get(\"confidence\"),\n }, indent=2))\n except Exception:\n pass\n else:\n text = generate_answer(\n model_id=args.model,\n adapters_npz=str(adapters_npz),\n program_root=str(program_root),\n delta_cap=float(max(0.0, args.delta_cap)),\n prompt=args.prompt,\n cache_dir=str(cache_dir),\n device_map=str(args.device_map),\n alpha=float(args.alpha),\n rank=int(args.rank),\n gsub=float(args.gsub),\n beta=float(args.mix_beta),\n of_sources=(\"zoom\" if (zoom_seed and not args.zoom_symbol) else args.of_sources),\n zoom_symbol=(args.zoom_symbol or (zoom_seed or None)),\n zoom_radius=int(args.zoom_radius),\n pack_context=bool(args.pack_context),\n pack_mode=args.pack_mode,\n context_tokens=int(args.context_tokens),\n require_citations=bool(args.require_citations),\n citations_per_paragraph=bool(args.citations_per_paragraph),\n function_first=bool(args.function_first),\n ff_max_candidates=int(args.ff_max_candidates),\n ff_window_lines=int(args.ff_window_lines),\n ff_threshold=float(args.ff_threshold),\n ff_noise_penalty=float(args.ff_noise_penalty),\n do_sample=bool(args.do_sample),\n temperature=float(args.temperature),\n top_p=float(args.top_p),\n repetition_penalty=float(args.repetition_penalty),\n min_new_tokens=int(args.min_new_tokens),\n max_new_tokens=int(args.max_new_tokens),\n kv_window=int(args.kv_window),\n head_device=str(args.head_device),\n seed=int(args.seed),\n entropy_aware=bool(args.entropy_aware),\n rank_min=int(args.rank_min),\n rank_max=int(args.rank_max),\n gsub_min=float(args.gsub_min),\n gsub_max=float(args.gsub_max),\n entropy_weights=str(args.entropy_weights),\n target_weights=(str(args.target_weights) if args.target_weights else (\n \"o_proj=1.15,v_proj=1.10,up_proj=1.10,down_proj=1.05,gate_proj=1.00,q_proj=0.95,k_proj=0.90\" if args.code_recall_preset else (\n \"q_proj=0.95,k_proj=0.95,v_proj=0.95,o_proj=1.10,up_proj=1.10,down_proj=1.05\" if args.know\n# ... truncated ...","source_hash":"22e0ac2e1ea477b2b98389629652af634a8290bc1a59f96b58a4505a31fbf152","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:run.main","uri":"program://Program_Conditioned_Adapter/function/run.main#L21-L389","kind":"function","name":"main","path":"run.py","language":"python","start_line":21,"end_line":389,"context_start_line":1,"context_end_line":395,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport argparse\nfrom pathlib import Path\nimport subprocess\nfrom typing import Dict, Any\nimport json\nimport os as _os\nimport importlib\n\nimport numpy as np\nfrom model.hf_snapshot import ensure_snapshot\nfrom modules.retrieval_policy import RetrievalPolicy # type: ignore\nfrom modules.runner_core import select_region, prepare_citations # type: ignore\n\n\n\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n # Model & sources\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--sources\", default=str(_root()), help=\"Program sources root (path or URI)\")\n p.add_argument(\"--program\", default=None, help=\"Alias for --sources (program root path or URI)\")\n p.add_argument(\"--prompt\", default=(\n \"Explain how generation works for this program. Cite path:line for each claim.\"\n ))\n\n # PCA program options (program-agnostic; backend not enforced here)\n p.add_argument(\"--backend\", default=None, help=\"Program backend id (informational; use --pg-backend to enable ProgramGraph features)\")\n p.add_argument(\"--retrieval-policy\", dest=\"retrieval_policy\", default=None, help='Retrieval mix like \"sim:0.6,struct:0.4\"')\n p.add_argument(\"--retrieval-temp\", dest=\"retrieval_temp\", type=float, default=None)\n p.add_argument(\"--use-cache\", action=\"store_true\", help=\"Prefer caches when available (symbol/windows/facts)\")\n p.add_argument(\"--citations-enforce\", action=\"store_true\", help=\"Require citations policy in PCA layer\")\n p.add_argument(\"--citations-repair\", action=\"store_true\")\n p.add_argument(\"--pg-backend\", default=None, help=\"Dotted path to ProgramGraph factory, e.g. 'examples.python_repo_grounded_qa.python_repo_graph:PythonRepoGraph'\")\n p.add_argument(\"--program-state\", default=None, help=\"Optional path to .program_state.json (defaults to /.program_state.json if present)\")\n\n # Base adapters generation\n p.add_argument(\"--adapters-dir\", default=None, help=\"Directory to save/find base adapters (defaults under examples/.../artifacts/base_adapters)\")\n p.add_argument(\"--base-rank\", type=int, default=8)\n p.add_argument(\"--embed-dim\", type=int, default=1536)\n p.add_argument(\"--include-text\", action=\"store_true\")\n p.add_argument(\"--text-max-bytes\", type=int, default=0)\n p.add_argument(\"--max-text-tokens\", type=int, default=0)\n p.add_argument(\"--seed\", type=int, default=0)\n\n # Runner mixing and capacity\n p.add_argument(\"--alpha\", type=float, default=20.0)\n p.add_argument(\"--rank\", type=int, default=12)\n p.add_argument(\"--gsub\", type=float, default=0.75)\n p.add_argument(\"--mix-beta\", type=float, default=0.1)\n p.add_argument(\"--target-weights\", default=None, help=\"CSV like q_proj=1,o_proj=1.1,up_proj=1.1,down_proj=1.05\")\n p.add_argument(\"--knowledge-preset\", action=\"store_true\", help=\"Use a preset of target weights tuned for knowledge recall (boost o/up/down; modest q/k/v)\")\n p.add_argument(\"--code-recall-preset\", action=\"store_true\", help=\"Opt-in preset tuned for code recall (o,v,up,down,gate emphasized; light q,k)\")\n p.add_argument(\"--delta-cap\", type=float, default=0.05, help=\"AB-norm clipping cap relative to base weight norm per layer (0 disables)\")\n # Entropy-aware capacity (forward to enhanced runner)\n p.add_argument(\"--entropy-aware\", action=\"store_true\")\n p.add_argument(\"--rank-min\", type=int, default=8)\n p.add_argument(\"--rank-max\", type=int, default=32)\n p.add_argument(\"--gsub-min\", type=float, default=0.6)\n p.add_argument(\"--gsub-max\", type=float, default=0.9)\n p.add_argument(\"--entropy-weights\", default=\"program=0.4,subgraph=0.4,question=0.2\")\n\n # Selection & context packing\n p.add_argument(\"--of-sources\", choices=[\"question\", \"zoom\"], default=\"question\")\n p.add_argument(\"--zoom-symbol\", default=None)\n p.add_argument(\"--zoom-radius\", type=int, default=0)\n p.add_argument(\"--pack-context\", action=\"store_true\")\n p.add_argument(\"--pack-mode\", choices=[\"heads\", \"windows\"], default=\"heads\")\n p.add_argument(\"--context-tokens\", type=int, default=3000)\n p.add_argument(\"--require-citations\", action=\"store_true\")\n p.add_argument(\"--citations-per-paragraph\", action=\"store_true\")\n p.add_argument(\"--function-first\", action=\"store_true\")\n p.add_argument(\"--ff-max-candidates\", type=int, default=24)\n p.add_argument(\"--ff-window-lines\", type=int, default=80)\n p.add_argument(\"--ff-threshold\", type=float, default=0.55)\n p.add_argument(\"--ff-noise-penalty\", type=float, default=0.30)\n # Layer schedule and q-aware weights (opt-in)\n p.add_argument(\"--layer-schedule\", action=\"store_true\", help=\"Enable a light per-layer multiplier rising toward top third (additive; default off)\")\n p.add_argument(\"--q-aware-weights\", action=\"store_true\", help=\"Heuristic reweighting of targets by question intent (additive; default off)\")\n p.add_argument(\"--per-target-rank-schedule\", action=\"store_true\", help=\"Trim per-target effective rank at run-time (additive; default off)\")\n p.add_argument(\"--rank-budget\", type=int, default=0, help=\"Optional per-layer rank budget across targets; rescale keeps to meet budget (0 disables)\")\n p.add_argument(\"--ablate-attn\", action=\"store_true\", help=\"Ablate attention targets (q/k/v/o) by zeroing their weights\")\n p.add_argument(\"--ablate-mlp\", action=\"store_true\", help=\"Ablate MLP targets (up/down/gate) by zeroing their weights\")\n p.add_argument(\"--layer-rank-tiers\", action=\"store_true\", help=\"Use top/mid/low thirds per-layer rank keeps by target group (opt-in)\")\n # Mixture bank (opt-in)\n p.add_argument(\"--mixture-m\", type=int, default=0, help=\"Top-m subgraph adapters from bank to mix (0 disables)\")\n p.add_argument(\"--adapters-bank\", default=None, help=\"Path to a bank of sub_adapters (from build --per-module)\")\n # Adapter mapping is always enhanced; no CLI toggle\n # Alpha warmup and decoding hooks (opt-in)\n p.add_argument(\"--alpha-warmup\", action=\"store_true\", help=\"Use a lighter alpha on first attempt (or first structured pass), then full alpha on retry/subsequent passes\")\n p.add_argument(\"--adapter-aware-decoding\", action=\"store_true\", help=\"Slightly relax sampling and prompt pointer-first when citations are required\")\n # Telemetry verification (opt-in)\n p.add_argument(\"--telemetry-tests\", action=\"store_true\", help=\"Attempt simple verify_with_tests() on a few selected modules and record results (structured path only)\")\n # Reranking\n p.add_argument(\"--rerank\", action=\"store_true\")\n p.add_argument(\"--self-queries\", default=None, help=\"Path to self_queries.jsonl for retrieval boosts\")\n\n # Sampling controls\n p.add_argument(\"--do-sample\", action=\"store_true\")\n p.add_argument(\"--temperature\", type=float, default=0.7)\n p.add_argument(\"--top-p\", type=float, default=0.9)\n p.add_argument(\"--repetition-penalty\", type=float, default=1.1)\n p.add_argument(\"--min-new-tokens\", type=int, default=64)\n p.add_argument(\"--max-new-tokens\", type=int, default=512)\n # Local generation/perf controls\n p.add_argument(\"--kv-window\", type=int, default=0, help=\"Optional sliding KV window length; 0 disables\")\n p.add_argument(\"--head-device\", choices=[\"same\", \"cpu\", \"auto\"], default=\"same\", help=\"Place lm_head on cpu/auto to save VRAM\")\n\n # Device & telemetry\n p.add_argument(\"--device-map\", default=\"auto\", choices=[\"auto\", \"none\"])\n p.add_argument(\"--gpu-ids\", default=None)\n p.add_argument(\"--max-memory\", default=None)\n p.add_argument(\"--telemetry-out\", default=None)\n # DCPO/Structured controls\n p.add_argument(\"--structured\", action=\"store_true\")\n p.add_argument(\"--lfp-iters\", type=int, default=1)\n p.add_argument(\"--budget-H\", type=float, default=0.0)\n p.add_argument(\"--monotone-selection\", action=\"store_true\")\n p.add_argument(\"--samples\", type=int, default=1)\n p.add_argument(\"--cone-join\", choices=[\"concat\", \"weighted\"], default=\"concat\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\", help=\"Cache directory for HF models/tokenizers\")\n p.add_argument(\"--verbose\", action=\"store_true\")\n p.add_argument(\"--no-adapters\", action=\"store_true\", help=\"Disable applying adapters in the enhanced runner\")\n p.add_argument(\"--commit-footer\", action=\"store_true\", help=\"Append 'answer valid for commit X' footer\")\n args = p.parse_args()\n\n # Normalize program root (prefer --sources; allow --program alias)\n src_arg = args.sources\n if getattr(args, \"program\", None):\n src_arg = args.program if args.program else src_arg\n program_root = Path(src_arg)\n # Example directory for artifacts colocated with this run.py\n example_dir = Path(__file__).resolve().parent\n artifacts = example_dir / \"artifacts\"\n base_dir = Path(args.adapters_dir) if args.adapters_dir else (artifacts / \"base_adapters\")\n base_dir.mkdir(parents=True, exist_ok=True)\n cache_dir = Path(args.cache_dir)\n cache_dir.mkdir(parents=True, exist_ok=True)\n adapters_npz = base_dir / \"adapters.npz\"\n manifest_path = base_dir / \"manifest.json\"\n # Resolve program state path\n ps_path = None\n if args.program_state:\n ps_path = Path(args.program_state)\n else:\n default_ps = base_dir / \".program_state.json\"\n if default_ps.exists():\n ps_path = default_ps\n\n # Generate base adapters if not present by delegating to build.py (program-agnostic)\n if not adapters_npz.exists():\n cmd = [\n sys.executable,\n str(Path(__file__).resolve().parent / \"build.py\"),\n \"--sources\", str(program_root),\n \"--model\", str(args.model),\n \"--adapters-dir\", str(base_dir),\n \"--embed-dim\", str(int(args.embed_dim)),\n \"--seed\", str(int(args.seed)),\n ]\n if bool(args.include_text):\n cmd.append(\"--include-text\")\n if int(args.text_max_bytes) > 0:\n cmd += [\"--text-max-bytes\", str(int(args.text_max_bytes))]\n if int(args.max_text_tokens) > 0:\n cmd += [\"--max-text-tokens\", str(int(args.max_text_tokens))]\n if bool(args.knowledge_preset):\n cmd.append(\"--knowledge-preset\")\n if bool(args.code_recall_preset):\n cmd.append(\"--code-recall-preset\")\n subprocess.check_call(cmd)\n\n # Run directly via orchestrator\n from modules.runner import generate_answer\n from modules.runner import generate_answer_structured\n\n if args.gpu_ids and str(args.gpu_ids).strip():\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu_ids).strip()\n os.environ.setdefault(\"PYTORCH_CUDA_ALLOC_CONF\", \"expandable_segments:True\")\n\n # ProgramGraph + RetrievalPolicy selection (seed zoom) if requested\n zoom_seed = None\n try:\n if args.pg_backend:\n # Load PG factory dynamically only when explicitly provided\n def _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n pg_ctor = _load_symbol(args.pg_backend)\n pg = pg_ctor(str(program_root), ignore=None)\n pol = RetrievalPolicy.from_spec(args.retrieval_policy, temp=args.retrieval_temp)\n region_ids = select_region(args.prompt, pg, pol, top_k=8)\n ents_by_id = {e.id: e for e in pg.entities()}\n names = []\n for eid in region_ids[:4]:\n e = ents_by_id.get(eid)\n if e and e.name:\n names.append(e.name)\n if names:\n zoom_seed = \",\".join(names)\n except Exception:\n zoom_seed = None\n\n if bool(args.structured):\n res = generate_answer_structured(\n model_id=args.model,\n adapters_npz=str(adapters_npz),\n program_root=str(program_root),\n delta_cap=float(max(0.0, args.delta_cap)),\n prompt=args.prompt,\n cache_dir=str(cache_dir),\n device_map=str(args.device_map),\n alpha=float(args.alpha),\n rank=int(args.rank),\n gsub=float(args.gsub),\n beta=float(args.mix_beta),\n of_sources=(\"zoom\" if (zoom_seed and not args.zoom_symbol) else args.of_sources),\n zoom_symbol=(args.zoom_symbol or (zoom_seed or None)),\n zoom_radius=int(args.zoom_radius),\n pack_context=bool(args.pack_context),\n pack_mode=args.pack_mode,\n context_tokens=int(args.context_tokens),\n require_citations=bool(args.require_citations),\n citations_per_paragraph=bool(args.citations_per_paragraph),\n function_first=bool(args.function_first),\n ff_max_candidates=int(args.ff_max_candidates),\n ff_window_lines=int(args.ff_window_lines),\n ff_threshold=float(args.ff_threshold),\n ff_noise_penalty=float(args.ff_noise_penalty),\n do_sample=bool(args.do_sample),\n temperature=float(args.temperature),\n top_p=float(args.top_p),\n repetition_penalty=float(args.repetition_penalty),\n min_new_tokens=int(args.min_new_tokens),\n max_new_tokens=int(args.max_new_tokens),\n kv_window=int(args.kv_window),\n head_device=str(args.head_device),\n seed=int(args.seed),\n entropy_aware=bool(args.entropy_aware),\n rank_min=int(args.rank_min),\n rank_max=int(args.rank_max),\n gsub_min=float(args.gsub_min),\n gsub_max=float(args.gsub_max),\n entropy_weights=str(args.entropy_weights),\n target_weights=(str(args.target_weights) if args.target_weights else (\n \"o_proj=1.15,v_proj=1.10,up_proj=1.10,down_proj=1.05,gate_proj=1.00,q_proj=0.95,k_proj=0.90\" if args.code_recall_preset else (\n \"q_proj=0.95,k_proj=0.95,v_proj=0.95,o_proj=1.10,up_proj=1.10,down_proj=1.05\" if args.knowledge_preset else None\n )\n )),\n rerank=bool(args.rerank),\n self_queries_path=(str(args.self_queries) if args.self_queries else None),\n commit_footer=bool(args.commit_footer),\n verbose=bool(args.verbose),\n lfp_iters=int(args.lfp_iters),\n budget_H=float(args.budget_H),\n monotone_selection=bool(args.monotone_selection),\n program_state_path=(str(ps_path) if ps_path else None),\n samples=int(args.samples),\n cone_join=str(args.cone_join),\n telemetry_out=(str(args.telemetry_out) if args.telemetry_out else None),\n layer_schedule=bool(args.layer_schedule),\n q_aware_weights=bool(args.q_aware_weights),\n mixture_m=int(args.mixture_m),\n adapters_bank=(str(args.adapters_bank) if args.adapters_bank else None),\n # Forward additional knobs to structured path\n per_target_rank_schedule=bool(args.per_target_rank_schedule),\n rank_budget=int(args.rank_budget),\n ablate_attn=bool(args.ablate_attn),\n ablate_mlp=bool(args.ablate_mlp),\n alpha_warmup=bool(args.alpha_warmup),\n adapter_aware_decoding=bool(args.adapter_aware_decoding),\n layer_rank_tiers=bool(args.layer_rank_tiers),\n telemetry_verify_tests=bool(args.telemetry_tests),\n )\n # PCA evidence/provenance stamping when enabled\n try:\n if (args.citations_enforce or args.citations_per_paragraph or args.citations_repair) and args.pg_backend:\n # Load manifest (for provenance)\n mf_obj: Dict[str, Any] = {}\n try:\n if os.path.exists(manifest_path):\n mf_obj = json.loads(open(manifest_path, \"r\", encoding=\"utf-8\").read())\n except Exception:\n mf_obj = {}\n # Minimal unit\n unit = {\"text\": res.get(\"text\", \"\"), \"evidence\": []}\n citations_policy = {\n \"enforce\": bool(args.citations_enforce),\n \"per_paragraph\": bool(args.citations_per_paragraph),\n \"repair\": bool(args.citations_repair),\n }\n # Build ProgramGraph for stamping when provided\n def _load_symbol2(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n pg_ctor2 = _load_symbol2(args.pg_backend)\n pg2 = pg_ctor2(str(program_root), ignore=None)\n pol2 = RetrievalPolicy.from_spec(args.retrieval_policy, temp=args.retrieval_temp)\n region_ids2 = select_region(args.prompt, pg2, pol2, top_k=8)\n stamped = prepare_citations([unit], region_ids2, pg2, citations_policy=citations_policy, manifest=mf_obj)\n if stamped:\n res[\"evidence\"] = stamped[0].get(\"evidence\", [])\n res[\"provenance\"] = stamped[0].get(\"provenance\", res.get(\"provenance\"))\n except Exception:\n pass\n print(res.get(\"text\", \"\"))\n if args.verbose:\n try:\n import json as _json\n print(_json.dumps({\n \"must\": len(res.get(\"must\", [])),\n \"may\": len(res.get(\"may\", [])),\n \"lfp_passes\": res.get(\"lfp_passes\"),\n \"converged\": res.get(\"converged\"),\n \"confidence\": res.get(\"confidence\"),\n }, indent=2))\n except Exception:\n pass\n else:\n text = generate_answer(\n model_id=args.model,\n adapters_npz=str(adapters_npz),\n program_root=str(program_root),\n delta_cap=float(max(0.0, args.delta_cap)),\n prompt=args.prompt,\n cache_dir=str(cache_dir),\n device_map=str(args.device_map),\n alpha=float(args.alpha),\n rank=int(args.rank),\n gsub=float(args.gsub),\n beta=float(args.mix_beta),\n of_sources=(\"zoom\" if (zoom_seed and not args.zoom_symbol) else args.of_sources),\n zoom_symbol=(args.zoom_symbol or (zoom_seed or None)),\n zoom_radius=int(args.zoom_radius),\n pack_context=bool(args.pack_context),\n pack_mode=args.pack_mode,\n context_tokens=int(args.context_tokens),\n require_citations=bool(args.require_citations),\n citations_per_paragraph=bool(args.citations_per_paragraph),\n function_first=bool(args.function_first),\n ff_max_candidates=int(args.ff_max_candidates),\n ff_window_lines=int(args.ff_window_lines),\n ff_threshold=float(args.ff_threshold),\n ff_noise_penalty=float(args.ff_noise_penalty),\n do_sample=bool(args.do_sample),\n temperature=float(args.temperature),\n top_p=float(args.top_p),\n repetition_penalty=float(args.repetition_penalty),\n min_new_tokens=int(args.min_new_tokens),\n max_new_tokens=int(args.max_new_tokens),\n kv_window=int(args.kv_window),\n head_device=str(args.head_device),\n seed=int(args.seed),\n entropy_aware=bool(args.entropy_aware),\n rank_min=int(args.rank_min),\n rank_max=int(args.rank_max),\n gsub_min=float(args.gsub_min),\n gsub_max=float(args.gsub_max),\n entropy_weights=str(args.entropy_weights),\n target_weights=(str(args.target_weights) if args.target_weights else (\n \"o_proj=1.15,v_proj=1.10,up_proj=1.10,down_proj=1.05,gate_proj=1.00,q_proj=0.95,k_proj=0.90\" if args.code_recall_preset else (\n \"q_proj=0.95,k_proj=0.95,v_proj=0.95,o_proj=1.10,up_proj=1.10,down_proj=1.05\" if args.know\n# ... truncated ...","source_hash":"22e0ac2e1ea477b2b98389629652af634a8290bc1a59f96b58a4505a31fbf152","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:run._load_symbol","uri":"program://Program_Conditioned_Adapter/function/run._load_symbol#L190-L193","kind":"function","name":"_load_symbol","path":"run.py","language":"python","start_line":190,"end_line":193,"context_start_line":170,"context_end_line":213,"code":" cmd += [\"--max-text-tokens\", str(int(args.max_text_tokens))]\n if bool(args.knowledge_preset):\n cmd.append(\"--knowledge-preset\")\n if bool(args.code_recall_preset):\n cmd.append(\"--code-recall-preset\")\n subprocess.check_call(cmd)\n\n # Run directly via orchestrator\n from modules.runner import generate_answer\n from modules.runner import generate_answer_structured\n\n if args.gpu_ids and str(args.gpu_ids).strip():\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu_ids).strip()\n os.environ.setdefault(\"PYTORCH_CUDA_ALLOC_CONF\", \"expandable_segments:True\")\n\n # ProgramGraph + RetrievalPolicy selection (seed zoom) if requested\n zoom_seed = None\n try:\n if args.pg_backend:\n # Load PG factory dynamically only when explicitly provided\n def _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n pg_ctor = _load_symbol(args.pg_backend)\n pg = pg_ctor(str(program_root), ignore=None)\n pol = RetrievalPolicy.from_spec(args.retrieval_policy, temp=args.retrieval_temp)\n region_ids = select_region(args.prompt, pg, pol, top_k=8)\n ents_by_id = {e.id: e for e in pg.entities()}\n names = []\n for eid in region_ids[:4]:\n e = ents_by_id.get(eid)\n if e and e.name:\n names.append(e.name)\n if names:\n zoom_seed = \",\".join(names)\n except Exception:\n zoom_seed = None\n\n if bool(args.structured):\n res = generate_answer_structured(\n model_id=args.model,\n adapters_npz=str(adapters_npz),\n program_root=str(program_root),","source_hash":"22e0ac2e1ea477b2b98389629652af634a8290bc1a59f96b58a4505a31fbf152","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:run._load_symbol2","uri":"program://Program_Conditioned_Adapter/function/run._load_symbol2#L298-L301","kind":"function","name":"_load_symbol2","path":"run.py","language":"python","start_line":298,"end_line":301,"context_start_line":278,"context_end_line":321,"code":" telemetry_verify_tests=bool(args.telemetry_tests),\n )\n # PCA evidence/provenance stamping when enabled\n try:\n if (args.citations_enforce or args.citations_per_paragraph or args.citations_repair) and args.pg_backend:\n # Load manifest (for provenance)\n mf_obj: Dict[str, Any] = {}\n try:\n if os.path.exists(manifest_path):\n mf_obj = json.loads(open(manifest_path, \"r\", encoding=\"utf-8\").read())\n except Exception:\n mf_obj = {}\n # Minimal unit\n unit = {\"text\": res.get(\"text\", \"\"), \"evidence\": []}\n citations_policy = {\n \"enforce\": bool(args.citations_enforce),\n \"per_paragraph\": bool(args.citations_per_paragraph),\n \"repair\": bool(args.citations_repair),\n }\n # Build ProgramGraph for stamping when provided\n def _load_symbol2(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n pg_ctor2 = _load_symbol2(args.pg_backend)\n pg2 = pg_ctor2(str(program_root), ignore=None)\n pol2 = RetrievalPolicy.from_spec(args.retrieval_policy, temp=args.retrieval_temp)\n region_ids2 = select_region(args.prompt, pg2, pol2, top_k=8)\n stamped = prepare_citations([unit], region_ids2, pg2, citations_policy=citations_policy, manifest=mf_obj)\n if stamped:\n res[\"evidence\"] = stamped[0].get(\"evidence\", [])\n res[\"provenance\"] = stamped[0].get(\"provenance\", res.get(\"provenance\"))\n except Exception:\n pass\n print(res.get(\"text\", \"\"))\n if args.verbose:\n try:\n import json as _json\n print(_json.dumps({\n \"must\": len(res.get(\"must\", [])),\n \"may\": len(res.get(\"may\", [])),\n \"lfp_passes\": res.get(\"lfp_passes\"),\n \"converged\": res.get(\"converged\"),\n \"confidence\": res.get(\"confidence\"),","source_hash":"22e0ac2e1ea477b2b98389629652af634a8290bc1a59f96b58a4505a31fbf152","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:build","uri":"program://Program_Conditioned_Adapter/module/build#L1-L1127","kind":"module","name":"build","path":"build.py","language":"python","start_line":1,"end_line":1127,"context_start_line":1,"context_end_line":1127,"code":"import os\nimport argparse\nimport json\nimport hashlib\nimport subprocess\nfrom datetime import datetime, timezone\nfrom typing import Optional, Dict, Tuple, List\nimport sys\nimport random\nimport platform\nimport importlib\n\nimport numpy as np\n\nfrom modules.embedding import ( # type: ignore\n build_program_embedding,\n build_subgraph_embedding_from_program,\n)\nfrom modules.adapter import ( # type: ignore\n generate_lora_from_embedding,\n generate_lora_from_embedding_torch,\n save_npz,\n)\nfrom model.hf_snapshot import ensure_snapshot\nfrom model.inspect import detect_target_shapes_from_model\nfrom modules.capacity import entropy_score # type: ignore\n#\n# NOTE: Build is program-agnostic; no backend-specific imports here.\nfrom modules.program_graph import ProgramGraph # type: ignore\n\ndef _load_symbol(path: Optional[str]):\n if not path:\n return None\n mod, _, attr = path.partition(\":\")\n if not mod or not attr:\n raise ValueError(f\"Invalid symbol path '{path}', expected 'module:attr'\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--sources\", required=False, help=\"Program sources root (path or URI)\")\n p.add_argument(\"--program\", required=False, help=\"Alias for --sources (program root path or URI)\")\n p.add_argument(\"--model\", required=True)\n p.add_argument(\"--adapters-dir\", required=True)\n # Embedding\n p.add_argument(\"--embed-dim\", type=int, default=1536)\n p.add_argument(\"--include-text\", action=\"store_true\")\n p.add_argument(\"--text-max-bytes\", type=int, default=0)\n p.add_argument(\"--max-text-tokens\", type=int, default=0)\n p.add_argument(\"--text-weight\", type=float, default=0.25)\n p.add_argument(\"--graph-prop-hops\", type=int, default=0)\n p.add_argument(\"--graph-prop-damp\", type=float, default=0.85)\n p.add_argument(\"--ignore\", action=\"append\", default=None)\n # Base adapter\n p.add_argument(\"--base-rank\", type=int, default=8)\n p.add_argument(\"--target-weights\", default=None)\n p.add_argument(\"--knowledge-preset\", action=\"store_true\")\n # Adapter mapping is always enhanced; no CLI toggle\n p.add_argument(\"--code-recall-preset\", action=\"store_true\", help=\"Opt-in preset tuned for code recall (o,v,up,down,gate emphasized; light q,k)\")\n # Priors & rounding\n p.add_argument(\"--kbann-priors\", action=\"store_true\")\n p.add_argument(\"--kbann-strong\", action=\"store_true\", help=\"Stronger inhibitory priors: boost o/up/down; damp v,k (slightly q)\")\n p.add_argument(\"--round-lora\", action=\"store_true\")\n p.add_argument(\"--round-threshold\", type=float, default=0.5)\n p.add_argument(\"--round-mode\", choices=[\"none\", \"hard\", \"soft\"], default=None, help=\"Rounding/sparsification mode for adapter matrices\")\n p.add_argument(\"--round-soft-kp\", type=float, default=10.0, help=\"Soft mode: percent of entries to keep (top-|.|) per axis\")\n p.add_argument(\"--round-axis\", choices=[\"row\", \"col\", \"global\"], default=\"row\", help=\"Soft mode axis for top-k sparsification\")\n p.add_argument(\"--zero-b\", action=\"store_true\", help=\"After generation, set all B matrices to zero (official LoRA init)\")\n p.add_argument(\"--learn-bias\", action=\"store_true\", help=\"Export zero bias vectors so downstream can fine-tune bias only\")\n # Contracts (optional snapshot into manifest and embedding contracts channel)\n p.add_argument(\"--contracts-require-citations\", action=\"store_true\")\n p.add_argument(\"--contracts-retrieval-policy\", default=None)\n p.add_argument(\"--contracts-retrieval-temp\", type=float, default=None)\n p.add_argument(\"--contracts-weight\", type=float, default=0.10)\n # Per-module export (built by default; use --no-per-module to skip)\n p.add_argument(\"--per-module\", action=\"store_true\", help=\"DEPRECATED: no effect; per-module bank is built by default. Use --no-per-module to disable.\")\n p.add_argument(\"--no-per-module\", action=\"store_true\", help=\"Disable building per-module sub_adapters bank (built by default)\")\n p.add_argument(\"--include-deps\", action=\"store_true\")\n p.add_argument(\"--max-deps\", type=int, default=4)\n p.add_argument(\"--sub-rank\", type=int, default=8)\n p.add_argument(\"--files-only\", action=\"store_true\", help=\"Export a files-only sub-adapter using an explicit file allowlist\")\n p.add_argument(\n \"--files-allowlist\",\n action=\"append\",\n default=None,\n help=\"Files-only mode: relative file paths to include (repeatable)\",\n )\n # HF cache/model\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--probe-full\", action=\"store_true\")\n p.add_argument(\"--gen-backend\", choices=[\"numpy\", \"torch\"], default=\"numpy\")\n p.add_argument(\"--seed\", type=int, default=0)\n p.add_argument(\"--verbose\", action=\"store_true\")\n p.add_argument(\"--fallback-topology\", action=\"store_true\", help=\"If embedding is empty, fall back to topology-only component z_top\")\n # ProgramGraph/Embedder plugins (backend-agnostic)\n p.add_argument(\"--pg-backend\", default=None, help=\"Dotted path to ProgramGraph factory, e.g. 'examples.python_repo_grounded_qa.python_repo_graph:PythonRepoGraph'\")\n p.add_argument(\"--embedder-fn\", default=None, help=\"Dotted path to embedder function '(pg, sources_root, **opts)->{z,...}'. Defaults to PCA program embedder.\")\n # Capacity scheduling\n p.add_argument(\"--auto-rank\", action=\"store_true\", help=\"Auto-schedule rank based on program complexity\")\n p.add_argument(\"--rank-min\", type=int, default=None)\n p.add_argument(\"--rank-max\", type=int, default=None)\n p.add_argument(\"--mdl-budget-params\", type=int, default=0, help=\"Optional MDL-style global parameter budget across all layers/targets (approximate)\")\n # Program seeding\n p.add_argument(\"--init-program-state\", action=\"store_true\", help=\"Initialize or update baseline ProgramState (.program_state.json) after build\")\n p.add_argument(\"--program-state-path\", default=None, help=\"Optional explicit path to write ProgramState JSON; defaults to /.program_state.json\")\n # (self-tune is no longer part of build; use modules/tune.py externally if desired)\n args = p.parse_args()\n\n # Warn on deprecated/ignored flags for per-module export\n try:\n if bool(args.per_module) and bool(args.no_per_module):\n print(\"[pca.build] Warning: Both --per-module and --no-per-module were provided; proceeding with --no-per-module (disables per-module bank).\", file=sys.stderr)\n elif bool(args.per_module):\n print(\"[pca.build] Warning: --per-module is deprecated and has no effect; per-module bank is built by default. Use --no-per-module to disable.\", file=sys.stderr)\n except Exception:\n pass\n # Determinism: set seeds and hash seed (best-effort)\n try:\n os.environ[\"PYTHONHASHSEED\"] = str(int(args.seed))\n except Exception:\n pass\n try:\n random.seed(int(args.seed))\n except Exception:\n pass\n try:\n np.random.seed(int(args.seed))\n except Exception:\n pass\n torch_seeded = False\n try:\n import torch as _torch # type: ignore\n _torch.manual_seed(int(args.seed))\n torch_seeded = True\n except Exception:\n torch_seeded = False\n\n out_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(args.adapters_dir)))\n os.makedirs(out_dir, exist_ok=True)\n\n # Normalize sources root (prefer --program, else --sources)\n src_arg = args.program if getattr(args, \"program\", None) else args.sources\n if not src_arg:\n raise SystemExit(\"--sources or --program is required\")\n sources_root = os.path.abspath(os.path.expanduser(os.path.expandvars(src_arg)))\n\n # Construct ProgramGraph via plugin (if provided)\n pg: Optional[ProgramGraph] = None\n try:\n pg_ctor = _load_symbol(args.pg_backend)\n if pg_ctor:\n pg = pg_ctor(sources_root, ignore=[s for s in (args.ignore or []) if s])\n except Exception:\n pg = None\n\n # Build embedding (program-agnostic) via plugin or default PCA program embedder\n embedder = _load_symbol(args.embedder_fn) if args.embedder_fn else None\n if embedder is not None:\n try:\n # Prefer new program-graph signature\n emb = embedder(\n pg,\n sources_root=sources_root,\n dim=int(args.embed_dim),\n seed=int(args.seed),\n include_text=bool(args.include_text),\n text_max_bytes=int(args.text_max_bytes),\n max_text_tokens=int(args.max_text_tokens),\n text_weight=float(args.text_weight),\n calls_weight=0.25,\n types_weight=0.20,\n tests_weight=0.15,\n graph_prop_hops=int(args.graph_prop_hops),\n graph_prop_damp=float(args.graph_prop_damp),\n ignore=[s for s in (args.ignore or []) if s],\n )\n except Exception:\n # Backward-compat: allow sources-root signature\n emb = embedder(\n sources_root,\n dim=int(args.embed_dim),\n seed=int(args.seed),\n include_text=bool(args.include_text),\n text_max_bytes=int(args.text_max_bytes),\n max_text_tokens=int(args.max_text_tokens),\n text_weight=float(args.text_weight),\n calls_weight=0.25,\n types_weight=0.20,\n tests_weight=0.15,\n graph_prop_hops=int(args.graph_prop_hops),\n graph_prop_damp=float(args.graph_prop_damp),\n ignore=[s for s in (args.ignore or []) if s],\n )\n else:\n emb = build_program_embedding(\n pg if pg is not None else None, # type: ignore[arg-type]\n sources_root=sources_root,\n dim=int(args.embed_dim),\n seed=int(args.seed),\n include_text=bool(args.include_text),\n text_max_bytes=int(args.text_max_bytes),\n max_text_tokens=int(args.max_text_tokens),\n text_weight=float(args.text_weight),\n calls_weight=0.25,\n types_weight=0.20,\n tests_weight=0.15,\n contracts_kv={\n \"require_citations\": (True if bool(args.contracts_require_citations) else False),\n \"retrieval_policy\": (str(args.contracts_retrieval_policy) if args.contracts_retrieval_policy else \"\"),\n \"retrieval_temp\": (str(args.contracts_retrieval_temp) if args.contracts_retrieval_temp is not None else \"\"),\n },\n contracts_weight=float(max(0.0, args.contracts_weight)),\n graph_prop_hops=int(args.graph_prop_hops),\n graph_prop_damp=float(args.graph_prop_damp),\n ignore=[s for s in (args.ignore or []) if s],\n )\n\n # Crash early on empty embeddings; optionally fall back to topology-only\n try:\n z = emb.get(\"z\")\n z_norm = float(np.linalg.norm(z)) if isinstance(z, np.ndarray) else 0.0\n fams = [\"z_sym\", \"z_doc\", \"z_mod\", \"z_top\", \"z_text\"]\n fam_norms = {k: (float(np.linalg.norm(emb[k])) if isinstance(emb.get(k), np.ndarray) else 0.0) for k in fams if k in emb}\n embed_empty = (z_norm == 0.0)\n fallback_used = False\n if embed_empty:\n if bool(args.fallback_topology) and (fam_norms.get(\"z_top\", 0.0) > 0.0):\n emb[\"z\"] = emb[\"z_top\"].astype(np.float32)\n fallback_used = True\n else:\n detail = {\n \"z_norm\": z_norm,\n \"family_norms\": fam_norms,\n \"hint\": \"Relax --ignore, enable --include-text, increase --text-max-bytes/--max-text-tokens, or pass --fallback-topology\",\n }\n raise RuntimeError(f\"Empty program embedding (||z||=0). Details: {json.dumps(detail)}\")\n except Exception as _e:\n if not isinstance(_e, RuntimeError):\n raise\n\n # Shapes & dims\n # Prefer explicit cache_dir; else env; else project root (/..../checkpoints)\n if args.cache_dir:\n cache_dir = args.cache_dir\n else:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n if env_cache:\n cache_dir = env_cache\n else:\n mod_dir = os.path.dirname(__file__)\n proj_root = os.path.abspath(os.path.join(mod_dir, \"..\", \"..\", \"..\"))\n cache_dir = os.path.join(proj_root, \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n # Infer shapes/dims from local snapshot config.json (no transformers)\n snap_dir = ensure_snapshot(args.model, cache_dir)\n cfg_path = os.path.join(snap_dir, \"config.json\")\n cfg_obj = json.load(open(cfg_path, \"r\", encoding=\"utf-8\"))\n d_model = int(cfg_obj.get(\"hidden_size\", 4096))\n num_layers = int(cfg_obj.get(\"num_hidden_layers\", 32))\n n_heads = int(cfg_obj.get(\"num_attention_heads\", 32))\n n_kv_heads = int(cfg_obj.get(\"num_key_value_heads\", n_heads))\n d_ff = int(cfg_obj.get(\"intermediate_size\", 11008))\n head_dim = int(cfg_obj.get(\"head_dim\", d_model // max(1, n_heads)))\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = {\n \"q_proj\": (n_heads * head_dim, d_model),\n \"k_proj\": (n_kv_heads * head_dim, d_model),\n \"v_proj\": (n_kv_heads * head_dim, d_model),\n \"o_proj\": (d_model, n_heads * head_dim),\n \"up_proj\": (d_ff, d_model),\n \"gate_proj\": (d_ff, d_model),\n \"down_proj\": (d_model, d_ff),\n }\n\n # Target weights: parse or preset\n def _parse_tw(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n n, v = p.split(\"=\", 1)\n out[n.strip()] = float(v)\n return out or None\n except Exception:\n return None\n tw = _parse_tw(args.target_weights)\n if tw is None:\n if bool(args.code_recall_preset):\n tw = {\"o_proj\": 1.15, \"v_proj\": 1.10, \"up_proj\": 1.10, \"down_proj\": 1.05, \"gate_proj\": 1.00, \"q_proj\": 0.95, \"k_proj\": 0.90}\n elif bool(args.knowledge_preset):\n tw = {\"q_proj\": 0.95, \"k_proj\": 0.95, \"v_proj\": 0.95, \"o_proj\": 1.10, \"up_proj\": 1.10, \"down_proj\": 1.05}\n\n # Optional KBANN priors (domain-derived boosts) - program-agnostic\n kbann_mode = None\n if bool(args.kbann_priors):\n try:\n # Estimate structural complexity and density from ProgramGraph (if available)\n ents_cnt = 0\n edges_cnt = 0\n try:\n if 'pg' in locals() and (pg is not None):\n try:\n ents_cnt = sum(1 for _ in pg.entities())\n except Exception:\n ents_cnt = 0\n try:\n edges_cnt = sum(1 for _ in pg.edges()) # type: ignore[attr-defined]\n except Exception:\n edges_cnt = 0\n except Exception:\n ents_cnt = 0\n edges_cnt = 0\n scomp = float(max(0.0, min(1.0, ents_cnt / 10000.0))) # normalize entity count\n density = float(edges_cnt) / float(max(1, ents_cnt))\n dens_term = 0.05 * float(min(1.0, density / 8.0))\n if bool(args.kbann_strong):\n kbann_mode = \"strong\"\n kb_tw = {\n \"o_proj\": 1.0 + 0.22 * scomp + 1.2 * dens_term,\n \"up_proj\": 1.0 + 0.22 * scomp + 1.2 * dens_term,\n \"down_proj\": 1.0 + 0.15 * scomp + dens_term,\n \"v_proj\": 0.85, # stronger inhibitory prior\n \"k_proj\": 0.95,\n \"q_proj\": 0.98,\n }\n else:\n kbann_mode = \"standard\"\n # Prior emphasis: o/up/down (composition/usage), mild downweight v, neutral k/q\n kb_tw = {\n \"o_proj\": 1.0 + 0.15 * scomp + dens_term,\n \"up_proj\": 1.0 + 0.15 * scomp + dens_term,\n \"down_proj\": 1.0 + 0.10 * scomp + (dens_term * 0.8),\n \"v_proj\": 0.95,\n \"k_proj\": 1.00,\n \"q_proj\": 1.00,\n }\n if tw is None:\n tw = kb_tw\n else:\n for k, v in kb_tw.items():\n tw[k] = float(tw.get(k, 1.0)) * float(v)\n except Exception:\n pass\n\n # Targets & shape safety\n NON_SQUARE_TARGETS = {\"k_proj\", \"v_proj\", \"up_proj\", \"down_proj\", \"gate_proj\"}\n if target_shapes:\n selected_targets: List[str] = list(target_shapes.keys())\n # shape sanity\n for t, shp in target_shapes.items():\n if not isinstance(shp, tuple) or len(shp) != 2:\n raise RuntimeError(f\"Invalid target shape for {t}: {shp}\")\n a, b = int(shp[0]), int(shp[1])\n if a <= 0 or b <= 0:\n raise RuntimeError(f\"Non-positive dims for {t}: {shp}\")\n else:\n # Without shapes, only allow square-safe targets\n selected_targets = [\"q_proj\", \"o_proj\"]\n # If user expects non-square behavior (e.g., via presets), hard-fail\n if any(t in NON_SQUARE_TARGETS for t in selected_targets):\n raise RuntimeError(\"Non-square targets requested without inferred shapes. Use --probe-full or restrict targets to q_proj,o_proj.\")\n\n # Per-layer/dims consistency\n if int(num_layers) <= 0 or int(d_model) <= 0:\n raise RuntimeError(\"Could not infer model dims (layers/d_model). Pass a valid --model or use --probe-full.\")\n\n # Final normalization/clipping of target weights (single place)\n if tw:\n try:\n vals = [float(v) for v in tw.values() if v is not None]\n if vals:\n mean_v = float(sum(vals) / float(len(vals)))\n if mean_v > 0:\n for k in list(tw.keys()):\n tw[k] = float(tw[k]) / mean_v\n # clip to [0.7, 1.3]\n for k in list(tw.keys()):\n v = float(tw[k])\n if v < 0.7:\n tw[k] = 0.7\n elif v > 1.3:\n tw[k] = 1.3\n except Exception:\n pass\n\n # Auto rank scheduling\n rank_min = int(args.rank_min) if args.rank_min is not None else int(args.base_rank)\n rank_max = int(args.rank_max) if args.rank_max is not None else int(max(rank_min, args.base_rank))\n rank_global = int(args.base_rank)\n scomp_ar = 0.5\n density_ar = 0.0\n if bool(args.auto_rank):\n try:\n ents_cnt_ar = 0\n edges_cnt_ar = 0\n if 'pg' in locals() and (pg is not None):\n try:\n ents_cnt_ar = sum(1 for _ in pg.entities())\n except Exception:\n ents_cnt_ar = 0\n try:\n edges_cnt_ar = sum(1 for _ in pg.edges()) # type: ignore[attr-defined]\n except Exception:\n edges_cnt_ar = 0\n density_ar = float(edges_cnt_ar) / float(max(1, ents_cnt_ar))\n scomp_ar = float(max(0.0, min(1.0, ents_cnt_ar / 10000.0)))\n comp = max(0.0, min(1.0, 0.5 * scomp_ar + 0.5 * min(1.0, density_ar / 8.0)))\n rank_global = int(max(1, round(rank_min + comp * float(max(0, rank_max - rank_min)))))\n except Exception:\n rank_global = int(args.base_rank)\n\n # Generate base adapters (backend selectable)\n backend_used = str(args.gen_backend)\n try:\n if backend_used == \"torch\":\n adapters = generate_lora_from_embedding_torch(\n# ... truncated ...","source_hash":"de2f270737807a4186a93dca8d809eb69862aadb302a59053827e8ee84cf8444","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:build._load_symbol","uri":"program://Program_Conditioned_Adapter/function/build._load_symbol#L31-L38","kind":"function","name":"_load_symbol","path":"build.py","language":"python","start_line":31,"end_line":38,"context_start_line":11,"context_end_line":58,"code":"import importlib\n\nimport numpy as np\n\nfrom modules.embedding import ( # type: ignore\n build_program_embedding,\n build_subgraph_embedding_from_program,\n)\nfrom modules.adapter import ( # type: ignore\n generate_lora_from_embedding,\n generate_lora_from_embedding_torch,\n save_npz,\n)\nfrom model.hf_snapshot import ensure_snapshot\nfrom model.inspect import detect_target_shapes_from_model\nfrom modules.capacity import entropy_score # type: ignore\n#\n# NOTE: Build is program-agnostic; no backend-specific imports here.\nfrom modules.program_graph import ProgramGraph # type: ignore\n\ndef _load_symbol(path: Optional[str]):\n if not path:\n return None\n mod, _, attr = path.partition(\":\")\n if not mod or not attr:\n raise ValueError(f\"Invalid symbol path '{path}', expected 'module:attr'\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--sources\", required=False, help=\"Program sources root (path or URI)\")\n p.add_argument(\"--program\", required=False, help=\"Alias for --sources (program root path or URI)\")\n p.add_argument(\"--model\", required=True)\n p.add_argument(\"--adapters-dir\", required=True)\n # Embedding\n p.add_argument(\"--embed-dim\", type=int, default=1536)\n p.add_argument(\"--include-text\", action=\"store_true\")\n p.add_argument(\"--text-max-bytes\", type=int, default=0)\n p.add_argument(\"--max-text-tokens\", type=int, default=0)\n p.add_argument(\"--text-weight\", type=float, default=0.25)\n p.add_argument(\"--graph-prop-hops\", type=int, default=0)\n p.add_argument(\"--graph-prop-damp\", type=float, default=0.85)\n p.add_argument(\"--ignore\", action=\"append\", default=None)\n # Base adapter\n p.add_argument(\"--base-rank\", type=int, default=8)\n p.add_argument(\"--target-weights\", default=None)\n p.add_argument(\"--knowledge-preset\", action=\"store_true\")","source_hash":"de2f270737807a4186a93dca8d809eb69862aadb302a59053827e8ee84cf8444","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:build.main","uri":"program://Program_Conditioned_Adapter/function/build.main#L40-L1121","kind":"function","name":"main","path":"build.py","language":"python","start_line":40,"end_line":1121,"context_start_line":20,"context_end_line":1127,"code":" generate_lora_from_embedding,\n generate_lora_from_embedding_torch,\n save_npz,\n)\nfrom model.hf_snapshot import ensure_snapshot\nfrom model.inspect import detect_target_shapes_from_model\nfrom modules.capacity import entropy_score # type: ignore\n#\n# NOTE: Build is program-agnostic; no backend-specific imports here.\nfrom modules.program_graph import ProgramGraph # type: ignore\n\ndef _load_symbol(path: Optional[str]):\n if not path:\n return None\n mod, _, attr = path.partition(\":\")\n if not mod or not attr:\n raise ValueError(f\"Invalid symbol path '{path}', expected 'module:attr'\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--sources\", required=False, help=\"Program sources root (path or URI)\")\n p.add_argument(\"--program\", required=False, help=\"Alias for --sources (program root path or URI)\")\n p.add_argument(\"--model\", required=True)\n p.add_argument(\"--adapters-dir\", required=True)\n # Embedding\n p.add_argument(\"--embed-dim\", type=int, default=1536)\n p.add_argument(\"--include-text\", action=\"store_true\")\n p.add_argument(\"--text-max-bytes\", type=int, default=0)\n p.add_argument(\"--max-text-tokens\", type=int, default=0)\n p.add_argument(\"--text-weight\", type=float, default=0.25)\n p.add_argument(\"--graph-prop-hops\", type=int, default=0)\n p.add_argument(\"--graph-prop-damp\", type=float, default=0.85)\n p.add_argument(\"--ignore\", action=\"append\", default=None)\n # Base adapter\n p.add_argument(\"--base-rank\", type=int, default=8)\n p.add_argument(\"--target-weights\", default=None)\n p.add_argument(\"--knowledge-preset\", action=\"store_true\")\n # Adapter mapping is always enhanced; no CLI toggle\n p.add_argument(\"--code-recall-preset\", action=\"store_true\", help=\"Opt-in preset tuned for code recall (o,v,up,down,gate emphasized; light q,k)\")\n # Priors & rounding\n p.add_argument(\"--kbann-priors\", action=\"store_true\")\n p.add_argument(\"--kbann-strong\", action=\"store_true\", help=\"Stronger inhibitory priors: boost o/up/down; damp v,k (slightly q)\")\n p.add_argument(\"--round-lora\", action=\"store_true\")\n p.add_argument(\"--round-threshold\", type=float, default=0.5)\n p.add_argument(\"--round-mode\", choices=[\"none\", \"hard\", \"soft\"], default=None, help=\"Rounding/sparsification mode for adapter matrices\")\n p.add_argument(\"--round-soft-kp\", type=float, default=10.0, help=\"Soft mode: percent of entries to keep (top-|.|) per axis\")\n p.add_argument(\"--round-axis\", choices=[\"row\", \"col\", \"global\"], default=\"row\", help=\"Soft mode axis for top-k sparsification\")\n p.add_argument(\"--zero-b\", action=\"store_true\", help=\"After generation, set all B matrices to zero (official LoRA init)\")\n p.add_argument(\"--learn-bias\", action=\"store_true\", help=\"Export zero bias vectors so downstream can fine-tune bias only\")\n # Contracts (optional snapshot into manifest and embedding contracts channel)\n p.add_argument(\"--contracts-require-citations\", action=\"store_true\")\n p.add_argument(\"--contracts-retrieval-policy\", default=None)\n p.add_argument(\"--contracts-retrieval-temp\", type=float, default=None)\n p.add_argument(\"--contracts-weight\", type=float, default=0.10)\n # Per-module export (built by default; use --no-per-module to skip)\n p.add_argument(\"--per-module\", action=\"store_true\", help=\"DEPRECATED: no effect; per-module bank is built by default. Use --no-per-module to disable.\")\n p.add_argument(\"--no-per-module\", action=\"store_true\", help=\"Disable building per-module sub_adapters bank (built by default)\")\n p.add_argument(\"--include-deps\", action=\"store_true\")\n p.add_argument(\"--max-deps\", type=int, default=4)\n p.add_argument(\"--sub-rank\", type=int, default=8)\n p.add_argument(\"--files-only\", action=\"store_true\", help=\"Export a files-only sub-adapter using an explicit file allowlist\")\n p.add_argument(\n \"--files-allowlist\",\n action=\"append\",\n default=None,\n help=\"Files-only mode: relative file paths to include (repeatable)\",\n )\n # HF cache/model\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--probe-full\", action=\"store_true\")\n p.add_argument(\"--gen-backend\", choices=[\"numpy\", \"torch\"], default=\"numpy\")\n p.add_argument(\"--seed\", type=int, default=0)\n p.add_argument(\"--verbose\", action=\"store_true\")\n p.add_argument(\"--fallback-topology\", action=\"store_true\", help=\"If embedding is empty, fall back to topology-only component z_top\")\n # ProgramGraph/Embedder plugins (backend-agnostic)\n p.add_argument(\"--pg-backend\", default=None, help=\"Dotted path to ProgramGraph factory, e.g. 'examples.python_repo_grounded_qa.python_repo_graph:PythonRepoGraph'\")\n p.add_argument(\"--embedder-fn\", default=None, help=\"Dotted path to embedder function '(pg, sources_root, **opts)->{z,...}'. Defaults to PCA program embedder.\")\n # Capacity scheduling\n p.add_argument(\"--auto-rank\", action=\"store_true\", help=\"Auto-schedule rank based on program complexity\")\n p.add_argument(\"--rank-min\", type=int, default=None)\n p.add_argument(\"--rank-max\", type=int, default=None)\n p.add_argument(\"--mdl-budget-params\", type=int, default=0, help=\"Optional MDL-style global parameter budget across all layers/targets (approximate)\")\n # Program seeding\n p.add_argument(\"--init-program-state\", action=\"store_true\", help=\"Initialize or update baseline ProgramState (.program_state.json) after build\")\n p.add_argument(\"--program-state-path\", default=None, help=\"Optional explicit path to write ProgramState JSON; defaults to /.program_state.json\")\n # (self-tune is no longer part of build; use modules/tune.py externally if desired)\n args = p.parse_args()\n\n # Warn on deprecated/ignored flags for per-module export\n try:\n if bool(args.per_module) and bool(args.no_per_module):\n print(\"[pca.build] Warning: Both --per-module and --no-per-module were provided; proceeding with --no-per-module (disables per-module bank).\", file=sys.stderr)\n elif bool(args.per_module):\n print(\"[pca.build] Warning: --per-module is deprecated and has no effect; per-module bank is built by default. Use --no-per-module to disable.\", file=sys.stderr)\n except Exception:\n pass\n # Determinism: set seeds and hash seed (best-effort)\n try:\n os.environ[\"PYTHONHASHSEED\"] = str(int(args.seed))\n except Exception:\n pass\n try:\n random.seed(int(args.seed))\n except Exception:\n pass\n try:\n np.random.seed(int(args.seed))\n except Exception:\n pass\n torch_seeded = False\n try:\n import torch as _torch # type: ignore\n _torch.manual_seed(int(args.seed))\n torch_seeded = True\n except Exception:\n torch_seeded = False\n\n out_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(args.adapters_dir)))\n os.makedirs(out_dir, exist_ok=True)\n\n # Normalize sources root (prefer --program, else --sources)\n src_arg = args.program if getattr(args, \"program\", None) else args.sources\n if not src_arg:\n raise SystemExit(\"--sources or --program is required\")\n sources_root = os.path.abspath(os.path.expanduser(os.path.expandvars(src_arg)))\n\n # Construct ProgramGraph via plugin (if provided)\n pg: Optional[ProgramGraph] = None\n try:\n pg_ctor = _load_symbol(args.pg_backend)\n if pg_ctor:\n pg = pg_ctor(sources_root, ignore=[s for s in (args.ignore or []) if s])\n except Exception:\n pg = None\n\n # Build embedding (program-agnostic) via plugin or default PCA program embedder\n embedder = _load_symbol(args.embedder_fn) if args.embedder_fn else None\n if embedder is not None:\n try:\n # Prefer new program-graph signature\n emb = embedder(\n pg,\n sources_root=sources_root,\n dim=int(args.embed_dim),\n seed=int(args.seed),\n include_text=bool(args.include_text),\n text_max_bytes=int(args.text_max_bytes),\n max_text_tokens=int(args.max_text_tokens),\n text_weight=float(args.text_weight),\n calls_weight=0.25,\n types_weight=0.20,\n tests_weight=0.15,\n graph_prop_hops=int(args.graph_prop_hops),\n graph_prop_damp=float(args.graph_prop_damp),\n ignore=[s for s in (args.ignore or []) if s],\n )\n except Exception:\n # Backward-compat: allow sources-root signature\n emb = embedder(\n sources_root,\n dim=int(args.embed_dim),\n seed=int(args.seed),\n include_text=bool(args.include_text),\n text_max_bytes=int(args.text_max_bytes),\n max_text_tokens=int(args.max_text_tokens),\n text_weight=float(args.text_weight),\n calls_weight=0.25,\n types_weight=0.20,\n tests_weight=0.15,\n graph_prop_hops=int(args.graph_prop_hops),\n graph_prop_damp=float(args.graph_prop_damp),\n ignore=[s for s in (args.ignore or []) if s],\n )\n else:\n emb = build_program_embedding(\n pg if pg is not None else None, # type: ignore[arg-type]\n sources_root=sources_root,\n dim=int(args.embed_dim),\n seed=int(args.seed),\n include_text=bool(args.include_text),\n text_max_bytes=int(args.text_max_bytes),\n max_text_tokens=int(args.max_text_tokens),\n text_weight=float(args.text_weight),\n calls_weight=0.25,\n types_weight=0.20,\n tests_weight=0.15,\n contracts_kv={\n \"require_citations\": (True if bool(args.contracts_require_citations) else False),\n \"retrieval_policy\": (str(args.contracts_retrieval_policy) if args.contracts_retrieval_policy else \"\"),\n \"retrieval_temp\": (str(args.contracts_retrieval_temp) if args.contracts_retrieval_temp is not None else \"\"),\n },\n contracts_weight=float(max(0.0, args.contracts_weight)),\n graph_prop_hops=int(args.graph_prop_hops),\n graph_prop_damp=float(args.graph_prop_damp),\n ignore=[s for s in (args.ignore or []) if s],\n )\n\n # Crash early on empty embeddings; optionally fall back to topology-only\n try:\n z = emb.get(\"z\")\n z_norm = float(np.linalg.norm(z)) if isinstance(z, np.ndarray) else 0.0\n fams = [\"z_sym\", \"z_doc\", \"z_mod\", \"z_top\", \"z_text\"]\n fam_norms = {k: (float(np.linalg.norm(emb[k])) if isinstance(emb.get(k), np.ndarray) else 0.0) for k in fams if k in emb}\n embed_empty = (z_norm == 0.0)\n fallback_used = False\n if embed_empty:\n if bool(args.fallback_topology) and (fam_norms.get(\"z_top\", 0.0) > 0.0):\n emb[\"z\"] = emb[\"z_top\"].astype(np.float32)\n fallback_used = True\n else:\n detail = {\n \"z_norm\": z_norm,\n \"family_norms\": fam_norms,\n \"hint\": \"Relax --ignore, enable --include-text, increase --text-max-bytes/--max-text-tokens, or pass --fallback-topology\",\n }\n raise RuntimeError(f\"Empty program embedding (||z||=0). Details: {json.dumps(detail)}\")\n except Exception as _e:\n if not isinstance(_e, RuntimeError):\n raise\n\n # Shapes & dims\n # Prefer explicit cache_dir; else env; else project root (/..../checkpoints)\n if args.cache_dir:\n cache_dir = args.cache_dir\n else:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n if env_cache:\n cache_dir = env_cache\n else:\n mod_dir = os.path.dirname(__file__)\n proj_root = os.path.abspath(os.path.join(mod_dir, \"..\", \"..\", \"..\"))\n cache_dir = os.path.join(proj_root, \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n # Infer shapes/dims from local snapshot config.json (no transformers)\n snap_dir = ensure_snapshot(args.model, cache_dir)\n cfg_path = os.path.join(snap_dir, \"config.json\")\n cfg_obj = json.load(open(cfg_path, \"r\", encoding=\"utf-8\"))\n d_model = int(cfg_obj.get(\"hidden_size\", 4096))\n num_layers = int(cfg_obj.get(\"num_hidden_layers\", 32))\n n_heads = int(cfg_obj.get(\"num_attention_heads\", 32))\n n_kv_heads = int(cfg_obj.get(\"num_key_value_heads\", n_heads))\n d_ff = int(cfg_obj.get(\"intermediate_size\", 11008))\n head_dim = int(cfg_obj.get(\"head_dim\", d_model // max(1, n_heads)))\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = {\n \"q_proj\": (n_heads * head_dim, d_model),\n \"k_proj\": (n_kv_heads * head_dim, d_model),\n \"v_proj\": (n_kv_heads * head_dim, d_model),\n \"o_proj\": (d_model, n_heads * head_dim),\n \"up_proj\": (d_ff, d_model),\n \"gate_proj\": (d_ff, d_model),\n \"down_proj\": (d_model, d_ff),\n }\n\n # Target weights: parse or preset\n def _parse_tw(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n n, v = p.split(\"=\", 1)\n out[n.strip()] = float(v)\n return out or None\n except Exception:\n return None\n tw = _parse_tw(args.target_weights)\n if tw is None:\n if bool(args.code_recall_preset):\n tw = {\"o_proj\": 1.15, \"v_proj\": 1.10, \"up_proj\": 1.10, \"down_proj\": 1.05, \"gate_proj\": 1.00, \"q_proj\": 0.95, \"k_proj\": 0.90}\n elif bool(args.knowledge_preset):\n tw = {\"q_proj\": 0.95, \"k_proj\": 0.95, \"v_proj\": 0.95, \"o_proj\": 1.10, \"up_proj\": 1.10, \"down_proj\": 1.05}\n\n # Optional KBANN priors (domain-derived boosts) - program-agnostic\n kbann_mode = None\n if bool(args.kbann_priors):\n try:\n # Estimate structural complexity and density from ProgramGraph (if available)\n ents_cnt = 0\n edges_cnt = 0\n try:\n if 'pg' in locals() and (pg is not None):\n try:\n ents_cnt = sum(1 for _ in pg.entities())\n except Exception:\n ents_cnt = 0\n try:\n edges_cnt = sum(1 for _ in pg.edges()) # type: ignore[attr-defined]\n except Exception:\n edges_cnt = 0\n except Exception:\n ents_cnt = 0\n edges_cnt = 0\n scomp = float(max(0.0, min(1.0, ents_cnt / 10000.0))) # normalize entity count\n density = float(edges_cnt) / float(max(1, ents_cnt))\n dens_term = 0.05 * float(min(1.0, density / 8.0))\n if bool(args.kbann_strong):\n kbann_mode = \"strong\"\n kb_tw = {\n \"o_proj\": 1.0 + 0.22 * scomp + 1.2 * dens_term,\n \"up_proj\": 1.0 + 0.22 * scomp + 1.2 * dens_term,\n \"down_proj\": 1.0 + 0.15 * scomp + dens_term,\n \"v_proj\": 0.85, # stronger inhibitory prior\n \"k_proj\": 0.95,\n \"q_proj\": 0.98,\n }\n else:\n kbann_mode = \"standard\"\n # Prior emphasis: o/up/down (composition/usage), mild downweight v, neutral k/q\n kb_tw = {\n \"o_proj\": 1.0 + 0.15 * scomp + dens_term,\n \"up_proj\": 1.0 + 0.15 * scomp + dens_term,\n \"down_proj\": 1.0 + 0.10 * scomp + (dens_term * 0.8),\n \"v_proj\": 0.95,\n \"k_proj\": 1.00,\n \"q_proj\": 1.00,\n }\n if tw is None:\n tw = kb_tw\n else:\n for k, v in kb_tw.items():\n tw[k] = float(tw.get(k, 1.0)) * float(v)\n except Exception:\n pass\n\n # Targets & shape safety\n NON_SQUARE_TARGETS = {\"k_proj\", \"v_proj\", \"up_proj\", \"down_proj\", \"gate_proj\"}\n if target_shapes:\n selected_targets: List[str] = list(target_shapes.keys())\n # shape sanity\n for t, shp in target_shapes.items():\n if not isinstance(shp, tuple) or len(shp) != 2:\n raise RuntimeError(f\"Invalid target shape for {t}: {shp}\")\n a, b = int(shp[0]), int(shp[1])\n if a <= 0 or b <= 0:\n raise RuntimeError(f\"Non-positive dims for {t}: {shp}\")\n else:\n # Without shapes, only allow square-safe targets\n selected_targets = [\"q_proj\", \"o_proj\"]\n # If user expects non-square behavior (e.g., via presets), hard-fail\n if any(t in NON_SQUARE_TARGETS for t in selected_targets):\n raise RuntimeError(\"Non-square targets requested without inferred shapes. Use --probe-full or restrict targets to q_proj,o_proj.\")\n\n # Per-layer/dims consistency\n if int(num_layers) <= 0 or int(d_model) <= 0:\n raise RuntimeError(\"Could not infer model dims (layers/d_model). Pass a valid --model or use --probe-full.\")\n\n # Final normalization/clipping of target weights (single place)\n if tw:\n try:\n vals = [float(v) for v in tw.values() if v is not None]\n if vals:\n mean_v = float(sum(vals) / float(len(vals)))\n if mean_v > 0:\n for k in list(tw.keys()):\n tw[k] = float(tw[k]) / mean_v\n # clip to [0.7, 1.3]\n for k in list(tw.keys()):\n v = float(tw[k])\n if v < 0.7:\n tw[k] = 0.7\n elif v > 1.3:\n tw[k] = 1.3\n except Exception:\n pass\n\n # Auto rank scheduling\n rank_min = int(args.rank_min) if args.rank_min is not None else int(args.base_rank)\n rank_max = int(args.rank_max) if args.rank_max is not None else int(max(rank_min, args.base_rank))\n rank_global = int(args.base_rank)\n scomp_ar = 0.5\n density_ar = 0.0\n if bool(args.auto_rank):\n try:\n ents_cnt_ar = 0\n edges_cnt_ar = 0\n if 'pg' in locals() and (pg is not None):\n try:\n ents_cnt_ar = sum(1 for _ in pg.entities())\n except Exception:\n ents_cnt_ar = 0\n try:\n edges_cnt_ar = sum(1 for _ in pg.edges()) # type: ignore[attr-defined]\n except Exception:\n edges_cnt_ar = 0\n density_ar = float(edges_cnt_ar) / float(max(1, ents_cnt_ar))\n scomp_ar = float(max(0.0, min(1.0, ents_cnt_ar / 10000.0)))\n comp = max(0.0, min(1.0, 0.5 * scomp_ar + 0.5 * min(1.0, density_ar / 8.0)))\n rank_global = int(max(1, round(rank_min + comp * float(max(0, rank_max - rank_min)))))\n except Exception:\n rank_global = int(args.base_rank)\n\n # Generate base adapters (backend selectable)\n backend_used = str(args.gen_backend)\n try:\n if backend_used == \"torch\":\n adapters = generate_lora_from_embedding_torch(\n emb[\"z\"],\n d_model=int(d_model),\n num_layers=int(num_layers),\n rank=int(rank_global),\n seed=int(args.seed),\n targets=selected_targets,\n target_shapes=target_shapes,\n target_weights=tw,\n )\n if bool(args.learn_bias):\n # Add zero bias vectors for downstream fine-t\n# ... truncated ...","source_hash":"de2f270737807a4186a93dca8d809eb69862aadb302a59053827e8ee84cf8444","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:build._parse_tw","uri":"program://Program_Conditioned_Adapter/function/build._parse_tw#L279-L292","kind":"function","name":"_parse_tw","path":"build.py","language":"python","start_line":279,"end_line":292,"context_start_line":259,"context_end_line":312,"code":" snap_dir = ensure_snapshot(args.model, cache_dir)\n cfg_path = os.path.join(snap_dir, \"config.json\")\n cfg_obj = json.load(open(cfg_path, \"r\", encoding=\"utf-8\"))\n d_model = int(cfg_obj.get(\"hidden_size\", 4096))\n num_layers = int(cfg_obj.get(\"num_hidden_layers\", 32))\n n_heads = int(cfg_obj.get(\"num_attention_heads\", 32))\n n_kv_heads = int(cfg_obj.get(\"num_key_value_heads\", n_heads))\n d_ff = int(cfg_obj.get(\"intermediate_size\", 11008))\n head_dim = int(cfg_obj.get(\"head_dim\", d_model // max(1, n_heads)))\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = {\n \"q_proj\": (n_heads * head_dim, d_model),\n \"k_proj\": (n_kv_heads * head_dim, d_model),\n \"v_proj\": (n_kv_heads * head_dim, d_model),\n \"o_proj\": (d_model, n_heads * head_dim),\n \"up_proj\": (d_ff, d_model),\n \"gate_proj\": (d_ff, d_model),\n \"down_proj\": (d_model, d_ff),\n }\n\n # Target weights: parse or preset\n def _parse_tw(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n n, v = p.split(\"=\", 1)\n out[n.strip()] = float(v)\n return out or None\n except Exception:\n return None\n tw = _parse_tw(args.target_weights)\n if tw is None:\n if bool(args.code_recall_preset):\n tw = {\"o_proj\": 1.15, \"v_proj\": 1.10, \"up_proj\": 1.10, \"down_proj\": 1.05, \"gate_proj\": 1.00, \"q_proj\": 0.95, \"k_proj\": 0.90}\n elif bool(args.knowledge_preset):\n tw = {\"q_proj\": 0.95, \"k_proj\": 0.95, \"v_proj\": 0.95, \"o_proj\": 1.10, \"up_proj\": 1.10, \"down_proj\": 1.05}\n\n # Optional KBANN priors (domain-derived boosts) - program-agnostic\n kbann_mode = None\n if bool(args.kbann_priors):\n try:\n # Estimate structural complexity and density from ProgramGraph (if available)\n ents_cnt = 0\n edges_cnt = 0\n try:\n if 'pg' in locals() and (pg is not None):\n try:\n ents_cnt = sum(1 for _ in pg.entities())\n except Exception:\n ents_cnt = 0","source_hash":"de2f270737807a4186a93dca8d809eb69862aadb302a59053827e8ee84cf8444","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:build._shape_for","uri":"program://Program_Conditioned_Adapter/function/build._shape_for#L489-L494","kind":"function","name":"_shape_for","path":"build.py","language":"python","start_line":489,"end_line":494,"context_start_line":469,"context_end_line":514,"code":"\n # Optional capacity schedule per target (effective rank via zeroing)\n per_target_keep: Dict[str, int] = {}\n if bool(args.auto_rank) and rank_global > 0:\n base_frac: Dict[str, float] = {\n \"o_proj\": 1.00, \"up_proj\": 1.00, \"down_proj\": 0.90, \"gate_proj\": 0.80,\n \"q_proj\": 0.70, \"k_proj\": 0.65, \"v_proj\": 0.60,\n }\n comp_adj = max(0.85, min(1.15, 1.0 + 0.15 * (scomp_ar - 0.5)))\n for t in selected_targets:\n frac = base_frac.get(t, 0.8) * comp_adj\n keep = int(max(1, min(rank_global, round(rank_global * frac))))\n per_target_keep[t] = keep\n # MDL-style global budget: cap total params across layers/targets\n try:\n budget = int(max(0, int(args.mdl_budget_params)))\n except Exception:\n budget = 0\n if budget > 0:\n # estimate params per target per layer: keep * (d_out + d_in)\n def _shape_for(t: str) -> Tuple[int, int]:\n if target_shapes and t in target_shapes:\n a, b = target_shapes[t]\n return int(a), int(b)\n # fallback square\n return int(d_model), int(d_model)\n total = 0\n per_t_cost: Dict[str, int] = {}\n for t, k in per_target_keep.items():\n a, b = _shape_for(t)\n cost = int(max(1, k)) * int(max(1, a + b))\n per_t_cost[t] = cost\n total += cost\n total *= int(num_layers)\n if total > budget:\n scale = float(budget) / float(total)\n # rescale keeps proportionally, ensure at least 1\n for t in list(per_target_keep.keys()):\n k = int(per_target_keep[t])\n k2 = int(max(1, round(float(k) * scale)))\n per_target_keep[t] = min(int(rank_global), k2)\n try:\n for i in range(len(adapters.get(\"layers\", []))):\n for name, tensors in adapters[\"layers\"][i].items():\n keep = int(per_target_keep.get(name, rank_global))\n A = tensors.get(\"A\"); B = tensors.get(\"B\")","source_hash":"de2f270737807a4186a93dca8d809eb69862aadb302a59053827e8ee84cf8444","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:build._sparsify_topk","uri":"program://Program_Conditioned_Adapter/function/build._sparsify_topk#L556-L588","kind":"function","name":"_sparsify_topk","path":"build.py","language":"python","start_line":556,"end_line":588,"context_start_line":536,"context_end_line":608,"code":" # Optional rounding/sparsification\n try:\n mode = (args.round_mode or (\"hard\" if bool(args.round_lora) else \"none\"))\n if mode == \"hard\":\n thr = float(max(0.0, args.round_threshold))\n for i in range(len(adapters.get(\"layers\", []))):\n for name, tensors in adapters[\"layers\"][i].items():\n for key in (\"A\", \"B\"):\n arr = tensors.get(key)\n if not isinstance(arr, np.ndarray):\n continue\n q = float(np.median(np.abs(arr))) if arr.size > 0 else 0.0\n if q <= 0:\n continue\n out = np.where(np.abs(arr) < (thr * q), 0.0, np.sign(arr) * q).astype(np.float32)\n tensors[key] = out\n elif mode == \"soft\":\n kp = float(max(0.0, args.round_soft_kp))\n axis = str(args.round_axis)\n keep_frac = max(0.0, min(100.0, kp)) / 100.0\n def _sparsify_topk(a: np.ndarray) -> np.ndarray:\n if a.size == 0 or keep_frac <= 0.0:\n return np.zeros_like(a)\n if keep_frac >= 1.0:\n return a\n if axis == \"global\":\n k = int(np.ceil(keep_frac * a.size))\n if k <= 0:\n return np.zeros_like(a)\n flat_idx = np.argpartition(np.abs(a).ravel(), -k)[-k:]\n mask = np.zeros(a.size, dtype=bool)\n mask[flat_idx] = True\n return (a.ravel() * mask).reshape(a.shape)\n elif axis == \"row\":\n rows, cols = a.shape\n k = int(np.ceil(keep_frac * cols))\n if k <= 0:\n return np.zeros_like(a)\n out = np.zeros_like(a)\n for r in range(rows):\n idx = np.argpartition(np.abs(a[r]), -k)[-k:]\n out[r, idx] = a[r, idx]\n return out\n else: # col\n rows, cols = a.shape\n k = int(np.ceil(keep_frac * rows))\n if k <= 0:\n return np.zeros_like(a)\n out = np.zeros_like(a)\n for c in range(cols):\n idx = np.argpartition(np.abs(a[:, c]), -k)[-k:]\n out[idx, c] = a[idx, c]\n return out\n for i in range(len(adapters.get(\"layers\", []))):\n for name, tensors in adapters[\"layers\"][i].items():\n for key in (\"A\", \"B\"):\n arr = tensors.get(key)\n if not isinstance(arr, np.ndarray) or arr.ndim != 2:\n continue\n tensors[key] = _sparsify_topk(arr).astype(np.float32)\n # else: none -> no rounding\n except Exception:\n pass\n\n # Git metadata (best-effort)\n commit_sha: Optional[str] = None\n tree_sha: Optional[str] = None\n try:\n commit_sha = subprocess.check_output([\"git\", \"-C\", sources_root, \"rev-parse\", \"HEAD\"], text=True).strip()\n except Exception:\n commit_sha = None\n try:\n tree_sha = subprocess.check_output([\"git\", \"-C\", sources_root, \"rev-parse\", \"HEAD^{tree}\"], text=True).strip()","source_hash":"de2f270737807a4186a93dca8d809eb69862aadb302a59053827e8ee84cf8444","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:smoke_repo.attention_demo","uri":"program://Program_Conditioned_Adapter/module/smoke_repo.attention_demo#L1-L57","kind":"module","name":"smoke_repo.attention_demo","path":"smoke_repo/attention_demo.py","language":"python","start_line":1,"end_line":57,"context_start_line":1,"context_end_line":57,"code":"\"\"\"\nA tiny, self-contained attention demo used for repo-grounding smoke tests.\n\nThe functions intentionally include clear docstrings and simple math so\nretrieval by the runner can surface these lines in context windows.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Tuple\nimport math\n\n\ndef attention_score(query: float, key: float) -> float:\n \"\"\"\n Compute an unnormalized attention score between a scalar query and key.\n\n This mirrors the dot-product component in scaled dot-product attention for\n the 1-D case. In higher dimensions, the score is the dot product of the\n query and key vectors.\n \"\"\"\n return float(query * key)\n\n\ndef scaled_dot_product_attention(\n query: Tuple[float, ...], key: Tuple[float, ...], value: Tuple[float, ...]\n) -> float:\n \"\"\"\n Minimal scaled dot-product attention for 1-token, 1-head toy inputs.\n\n Steps:\n 1) score = sum_i query[i] * key[i]\n 2) scale by sqrt(d) where d is the dimensionality of query/key\n 3) softmax over a single score is 1.0 (degenerate 1-token case)\n 4) output = weight * sum_i value[i] (here weight==1.0)\n \"\"\"\n if len(query) != len(key):\n raise ValueError(\"query and key must have same length\")\n d = max(1, len(query))\n score = sum(float(q) * float(k) for q, k in zip(query, key))\n scaled = score / math.sqrt(float(d))\n # With a single score, softmax(scaled) == 1.0; keep explicit for clarity\n weight = 1.0 if math.isfinite(scaled) else 0.0\n return float(weight * sum(float(v) for v in value))\n\n\ndef explain_attention_brief() -> str:\n \"\"\"\n Return a brief, human-readable explanation of attention suitable for tests.\n \"\"\"\n return (\n \"Attention computes similarity between a query and keys, normalizes the \"\n \"scores (softmax), and uses them to weight values. The scaled \"\n \"dot-product form divides by sqrt(d) to keep gradients stable.\"\n )\n\n","source_hash":"879bae3bd5b3cef532e3c334000865c03b308eae03d4133f0446b5b6014ffdb4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:smoke_repo.attention_demo.attention_score","uri":"program://Program_Conditioned_Adapter/function/smoke_repo.attention_demo.attention_score#L14-L22","kind":"function","name":"attention_score","path":"smoke_repo/attention_demo.py","language":"python","start_line":14,"end_line":22,"context_start_line":1,"context_end_line":42,"code":"\"\"\"\nA tiny, self-contained attention demo used for repo-grounding smoke tests.\n\nThe functions intentionally include clear docstrings and simple math so\nretrieval by the runner can surface these lines in context windows.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Tuple\nimport math\n\n\ndef attention_score(query: float, key: float) -> float:\n \"\"\"\n Compute an unnormalized attention score between a scalar query and key.\n\n This mirrors the dot-product component in scaled dot-product attention for\n the 1-D case. In higher dimensions, the score is the dot product of the\n query and key vectors.\n \"\"\"\n return float(query * key)\n\n\ndef scaled_dot_product_attention(\n query: Tuple[float, ...], key: Tuple[float, ...], value: Tuple[float, ...]\n) -> float:\n \"\"\"\n Minimal scaled dot-product attention for 1-token, 1-head toy inputs.\n\n Steps:\n 1) score = sum_i query[i] * key[i]\n 2) scale by sqrt(d) where d is the dimensionality of query/key\n 3) softmax over a single score is 1.0 (degenerate 1-token case)\n 4) output = weight * sum_i value[i] (here weight==1.0)\n \"\"\"\n if len(query) != len(key):\n raise ValueError(\"query and key must have same length\")\n d = max(1, len(query))\n score = sum(float(q) * float(k) for q, k in zip(query, key))\n scaled = score / math.sqrt(float(d))\n # With a single score, softmax(scaled) == 1.0; keep explicit for clarity","source_hash":"879bae3bd5b3cef532e3c334000865c03b308eae03d4133f0446b5b6014ffdb4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:smoke_repo.attention_demo.scaled_dot_product_attention","uri":"program://Program_Conditioned_Adapter/function/smoke_repo.attention_demo.scaled_dot_product_attention#L25-L44","kind":"function","name":"scaled_dot_product_attention","path":"smoke_repo/attention_demo.py","language":"python","start_line":25,"end_line":44,"context_start_line":5,"context_end_line":57,"code":"retrieval by the runner can surface these lines in context windows.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Tuple\nimport math\n\n\ndef attention_score(query: float, key: float) -> float:\n \"\"\"\n Compute an unnormalized attention score between a scalar query and key.\n\n This mirrors the dot-product component in scaled dot-product attention for\n the 1-D case. In higher dimensions, the score is the dot product of the\n query and key vectors.\n \"\"\"\n return float(query * key)\n\n\ndef scaled_dot_product_attention(\n query: Tuple[float, ...], key: Tuple[float, ...], value: Tuple[float, ...]\n) -> float:\n \"\"\"\n Minimal scaled dot-product attention for 1-token, 1-head toy inputs.\n\n Steps:\n 1) score = sum_i query[i] * key[i]\n 2) scale by sqrt(d) where d is the dimensionality of query/key\n 3) softmax over a single score is 1.0 (degenerate 1-token case)\n 4) output = weight * sum_i value[i] (here weight==1.0)\n \"\"\"\n if len(query) != len(key):\n raise ValueError(\"query and key must have same length\")\n d = max(1, len(query))\n score = sum(float(q) * float(k) for q, k in zip(query, key))\n scaled = score / math.sqrt(float(d))\n # With a single score, softmax(scaled) == 1.0; keep explicit for clarity\n weight = 1.0 if math.isfinite(scaled) else 0.0\n return float(weight * sum(float(v) for v in value))\n\n\ndef explain_attention_brief() -> str:\n \"\"\"\n Return a brief, human-readable explanation of attention suitable for tests.\n \"\"\"\n return (\n \"Attention computes similarity between a query and keys, normalizes the \"\n \"scores (softmax), and uses them to weight values. The scaled \"\n \"dot-product form divides by sqrt(d) to keep gradients stable.\"\n )\n\n","source_hash":"879bae3bd5b3cef532e3c334000865c03b308eae03d4133f0446b5b6014ffdb4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:smoke_repo.attention_demo.explain_attention_brief","uri":"program://Program_Conditioned_Adapter/function/smoke_repo.attention_demo.explain_attention_brief#L47-L55","kind":"function","name":"explain_attention_brief","path":"smoke_repo/attention_demo.py","language":"python","start_line":47,"end_line":55,"context_start_line":27,"context_end_line":57,"code":") -> float:\n \"\"\"\n Minimal scaled dot-product attention for 1-token, 1-head toy inputs.\n\n Steps:\n 1) score = sum_i query[i] * key[i]\n 2) scale by sqrt(d) where d is the dimensionality of query/key\n 3) softmax over a single score is 1.0 (degenerate 1-token case)\n 4) output = weight * sum_i value[i] (here weight==1.0)\n \"\"\"\n if len(query) != len(key):\n raise ValueError(\"query and key must have same length\")\n d = max(1, len(query))\n score = sum(float(q) * float(k) for q, k in zip(query, key))\n scaled = score / math.sqrt(float(d))\n # With a single score, softmax(scaled) == 1.0; keep explicit for clarity\n weight = 1.0 if math.isfinite(scaled) else 0.0\n return float(weight * sum(float(v) for v in value))\n\n\ndef explain_attention_brief() -> str:\n \"\"\"\n Return a brief, human-readable explanation of attention suitable for tests.\n \"\"\"\n return (\n \"Attention computes similarity between a query and keys, normalizes the \"\n \"scores (softmax), and uses them to weight values. The scaled \"\n \"dot-product form divides by sqrt(d) to keep gradients stable.\"\n )\n\n","source_hash":"879bae3bd5b3cef532e3c334000865c03b308eae03d4133f0446b5b6014ffdb4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding","uri":"program://Program_Conditioned_Adapter/module/modules.embedding#L1-L495","kind":"module","name":"modules.embedding","path":"modules/embedding.py","language":"python","start_line":1,"end_line":495,"context_start_line":1,"context_end_line":495,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport os\nimport re\nimport numpy as np\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\nfrom modules.program_graph import ProgramGraph, Artifact # type: ignore\n\nEMBED_DIM_DEFAULT = 128\nHASH_SEEDS = [1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344]\n\ndef auto_model_dims(model_id: str, cache_dir: Optional[str]) -> Tuple[int, int]:\n try:\n from transformers import AutoConfig # type: ignore\n\n cfg = AutoConfig.from_pretrained(model_id, cache_dir=cache_dir)\n n_layers = int(getattr(cfg, \"num_hidden_layers\", 0) or 0)\n d_model = int(getattr(cfg, \"hidden_size\", 0) or 0)\n return n_layers, d_model\n except Exception:\n return 0, 0\n\n\ndef _stable_hash(text: str, seed: int = 0) -> int:\n \"\"\"64-bit FNV-1a; returns positive 63-bit for stable modulo ops.\"\"\"\n mask64 = (1 << 64) - 1\n h = (1469598103934665603 ^ (seed & mask64)) & mask64\n fnv_prime = 1099511628211\n for ch in text.encode(\"utf-8\", errors=\"ignore\"):\n h ^= ch\n h = (h * fnv_prime) & mask64\n return h & ((1 << 63) - 1)\n\n\ndef _feature_hash(values: List[Tuple[str, float]], dim: int, seed: int) -> np.ndarray:\n \"\"\"Vectorized feature hashing with stable sign seed (≈10× faster).\"\"\"\n if not values:\n return np.zeros((dim,), dtype=np.float32)\n keys = [k for (k, _w) in values]\n weights = np.array([float(w) for (_k, w) in values], dtype=np.float32)\n idx = np.fromiter(((\n _stable_hash(k, seed=seed) % dim\n ) for k in keys), dtype=np.int64)\n # use seed+1 for sign; lowest bit decides sign\n signs = np.fromiter(((\n 1.0 if ((_stable_hash(k + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n ) for k in keys), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n np.add.at(vec, idx, signs * weights)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _accumulate_sparse(acc: Dict[int, float], key: str, weight: float, dim: int, seed: int) -> None:\n idx = int(_stable_hash(key, seed=seed) % dim)\n sign = 1.0 if ((_stable_hash(key + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n acc[idx] = float(acc.get(idx, 0.0)) + float(sign * weight)\n\n\ndef _dense_from_sparse(acc: Dict[int, float], dim: int) -> np.ndarray:\n if not acc:\n return np.zeros((dim,), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n for i, v in acc.items():\n if 0 <= int(i) < dim:\n vec[int(i)] = vec[int(i)] + float(v)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _artifact_rel_path(art: Artifact) -> Optional[str]:\n try:\n # Expect URIs like program:///artifact/\n u = art.uri\n if \"/artifact/\" in u:\n return u.split(\"/artifact/\", 1)[-1]\n return None\n except Exception:\n return None\n\n\ndef build_program_embedding(\n pg: ProgramGraph,\n *,\n sources_root: Optional[str] = None,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20, # kept for signature parity; may be neutral here\n tests_weight: float = 0.15, # kept for parity; may be neutral here\n contracts_kv: Optional[Dict[str, str]] = None,\n contracts_weight: float = 0.10,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n ignore: Optional[List[str]] = None,\n) -> Dict[str, np.ndarray]:\n \"\"\"Program-agnostic embedding from a ProgramGraph.\"\"\"\n # Symbolic/channel features from entities\n sym_feats: List[Tuple[str, float]] = []\n mod_feats: List[Tuple[str, float]] = []\n owners: List[str] = []\n try:\n for e in pg.entities():\n sym_feats.append((f\"{e.kind}:{e.name}\", 1.0))\n if e.owner:\n owners.append(e.owner)\n mod_feats.append((f\"owner:{e.owner}\", 0.2))\n except Exception:\n pass\n\n # Topology from edges aggregated by owner and entity degrees\n indeg: Dict[str, int] = {}\n outdeg: Dict[str, int] = {}\n ent_indeg: Dict[str, int] = {}\n ent_outdeg: Dict[str, int] = {}\n try:\n for ed in pg.edges():\n ent_outdeg[ed.src] = ent_outdeg.get(ed.src, 0) + 1\n ent_indeg[ed.dst] = ent_indeg.get(ed.dst, 0) + 1\n # Map entity degrees to owners if available\n ent_owner: Dict[str, Optional[str]] = {}\n try:\n for e in pg.entities():\n ent_owner[e.id] = e.owner\n except Exception:\n ent_owner = {}\n for ent, d in ent_indeg.items():\n ow = ent_owner.get(ent)\n if ow:\n indeg[ow] = indeg.get(ow, 0) + int(d)\n for ent, d in ent_outdeg.items():\n ow = ent_owner.get(ent)\n if ow:\n outdeg[ow] = outdeg.get(ow, 0) + int(d)\n except Exception:\n pass\n\n # Add simple file/dir topology derived from artifacts to ensure z_top density\n dir_counts: Dict[str, int] = {}\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel:\n continue\n dname = os.path.dirname(rel).replace(\"\\\\\", \"/\")\n dir_counts[dname] = dir_counts.get(dname, 0) + 1\n except Exception:\n # Fallback: approximate by walking sources_root if available\n if sources_root:\n try:\n for dirpath, _dirnames, filenames in os.walk(sources_root):\n rel_dir = os.path.relpath(dirpath, sources_root).replace(\"\\\\\", \"/\")\n if rel_dir == \".\":\n rel_dir = \"\"\n if filenames:\n dir_counts[rel_dir] = dir_counts.get(rel_dir, 0) + len(filenames)\n except Exception:\n pass\n topo_feats: List[Tuple[str, float]] = []\n owners_unique = sorted(set(owners))\n for ow in owners_unique:\n topo_feats.append((f\"indeg:{ow}\", float(indeg.get(ow, 0))))\n topo_feats.append((f\"outdeg:{ow}\", float(outdeg.get(ow, 0))))\n for d, c in dir_counts.items():\n topo_feats.append((f\"dir:{d}\", float(c)))\n\n # No dedicated doc/types/calls views at generic layer; set to zeros\n z_sym = _feature_hash(sym_feats, dim, seed + HASH_SEEDS[0])\n z_doc = np.zeros((dim,), dtype=np.float32)\n z_mod = _feature_hash(mod_feats, dim, seed + HASH_SEEDS[2])\n z_top = _feature_hash(topo_feats, dim, seed + HASH_SEEDS[3])\n z_types = np.zeros((dim,), dtype=np.float32)\n z_calls = np.zeros((dim,), dtype=np.float32)\n # Optional contracts channel: hash key/value hints from ProgramContracts\n z_contracts = np.zeros((dim,), dtype=np.float32)\n try:\n if contracts_kv:\n feats_c: List[Tuple[str, float]] = []\n for k, v in list(contracts_kv.items()):\n if v is None:\n continue\n feats_c.append((f\"contracts:{str(k)}:{str(v)}\", 1.0))\n if feats_c:\n z_contracts = _feature_hash(feats_c, dim, seed + HASH_SEEDS[6])\n except Exception:\n z_contracts = np.zeros((dim,), dtype=np.float32)\n\n # Optional textual channel from artifacts(kind=\"source\")\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0 and sources_root:\n ignore_list = [os.path.normpath(p) for p in (ignore or [])]\n def _ignored(rel: str) -> bool:\n r = os.path.normpath(rel)\n for pat in ignore_list:\n if r == pat or r.startswith(pat + os.sep):\n return True\n return False\n text_acc: Dict[int, float] = {}\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n files: List[str] = []\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel or _ignored(rel):\n continue\n ap = os.path.abspath(os.path.join(sources_root, rel))\n if os.path.isfile(ap):\n files.append(ap)\n except Exception:\n files = []\n # Fallback if ProgramGraph provided no artifacts\n if not files:\n try:\n exts_all = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n for root, dirs, fnames in os.walk(sources_root):\n rel_root = os.path.relpath(root, sources_root)\n if _ignored(rel_root):\n dirs[:] = []\n continue\n dirs[:] = [d for d in dirs if not _ignored(os.path.join(rel_root, d))]\n for fn in fnames:\n fp = os.path.join(root, fn)\n rel_fp = os.path.relpath(fp, sources_root)\n if _ignored(rel_fp):\n continue\n if os.path.splitext(fp)[1].lower() in exts_all:\n files.append(fp)\n except Exception:\n files = []\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if bytes_budget is not None and bytes_budget <= 0:\n pass\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n\n def _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n z = (\n _unit(z_sym)\n + _unit(z_mod)\n + _unit(z_top)\n + float(text_weight) * _unit(z_text)\n + float(types_weight) * _unit(z_types)\n + float(calls_weight) * _unit(z_calls)\n + float(max(0.0, contracts_weight)) * _unit(z_contracts)\n + float(tests_weight) * _unit(np.zeros_like(z_sym))\n )\n nrm = float(np.linalg.norm(z))\n if nrm > 0:\n z = z / nrm\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": z_doc.astype(np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_contracts\": z_contracts.astype(np.float32),\n \"z_tests\": np.zeros((dim,), dtype=np.float32),\n \"sparsity\": {\n \"z_sym\": float((np.count_nonzero(z_sym) / float(max(1, z_sym.size)))),\n \"z_doc\": float((np.count_nonzero(z_doc) / float(max(1, z_doc.size)))),\n \"z_mod\": float((np.count_nonzero(z_mod) / float(max(1, z_mod.size)))),\n \"z_top\": float((np.count_nonzero(z_top) / float(max(1, z_top.size)))),\n \"z_text\": float((np.count_nonzero(z_text) / float(max(1, z_text.size)))),\n \"z_types\": float((np.count_nonzero(z_types) / float(max(1, z_types.size)))),\n \"z_calls\": float((np.count_nonzero(z_calls) / float(max(1, z_calls.size)))),\n \"z_contracts\": float((np.count_nonzero(z_contracts) / float(max(1, z_contracts.size)))),\n \"z_tests\": 0.0,\n },\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\n\ndef build_subgraph_embedding_from_program(\n pg: ProgramGraph,\n *,\n sources_root: Optional[str] = None,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_owners: Optional[List[str]] = None,\n include_artifact_paths: Optional[List[str]] = None,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20,\n tests_weight: float = 0.15,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n) -> Dict[str, np.ndarray]:\n owners_set = set(include_owners or [])\n files_set_norm: Optional[Set[str]] = None\n if include_artifact_paths:\n files_set_norm = set([p.replace(\"\\\\\", \"/\") for p in include_artifact_paths])\n # Entities filtered by owners\n sym_feats: List[Tuple[str, float]] = []\n mod_feats: List[Tuple[str, float]] = []\n try:\n for e in pg.entities():\n if owners_set and (e.owner not in owners_set):\n continue\n sym_feats.append((f\"{e.kind}:{e.name}\", 1.0))\n if e.owner:\n mod_feats.append((f\"owner:{e.owner}\", 0.2))\n except Exception:\n pass\n # Topology filtered by owners\n indeg: Dict[str, int] = {}\n outdeg: Dict[str, int] = {}\n try:\n ent_owner: Dict[str, Optional[str]] = {}\n for e in pg.entities():\n ent_owner[e.id] = e.owner\n for ed in pg.edges():\n ow_src = ent_owner.get(ed.src)\n ow_dst = ent_owner.get(ed.dst)\n if owners_set and (ow_src not in owners_set and ow_dst not in owners_set):\n continue\n if ow_src:\n outdeg[ow_src] = outdeg.get(ow_src, 0) + 1\n if ow_dst:\n indeg[ow_dst] = indeg.get(ow_dst, 0) + 1\n except Exception:\n pass\n topo_feats: List[Tuple[str, float]] = []\n for ow in sorted(set([o for o in (include_owners or []) if o])):\n topo_feats.append((f\"indeg:{ow}\", float(indeg.get(ow, 0))))\n topo_feats.append((f\"outdeg:{ow}\", float(outdeg.get(ow, 0))))\n # Text channel: only from included artifact paths\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0 and sources_root and files_set_norm:\n text_acc: Dict[int, float] = {}\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n files: List[str] = []\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel:\n continue\n # allow both repo-absolute and rel forms in allowlist\n rel_norm = rel.replace(\"\\\\\", \"/\")\n abs_norm = os.path.abspath(os.path.join(sources_root, rel_norm))\n if (rel_norm in files_set_norm) or (abs_norm in files_set_norm):\n files.append(abs_norm)\n except Exception:\n files = []\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n z_sym = _feature_hash(sym_feats, dim, seed + HASH_SEEDS[0])\n z_mod = _feature_hash(mod_feats, dim, seed + HASH_SEEDS[2])\n z_top = _feature_hash(topo_feats, dim, seed + HASH_SEEDS[3])\n z_types = np.zeros((dim,), dtype=np.float32)\n z_calls = np.zeros((dim,), dtype=np.float32)\n def _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n z = (\n _unit(z_sym)\n + _unit(z_mod)\n + _unit(z_top)\n + float(text_weight) * _unit(z_text)\n + float(types_weight) * _unit(z_types)\n + float(calls_weight) * _unit(z_calls)\n + float(tests_weight) * _unit(np.zeros_like(z_sym))\n )\n nrm = float(np.linalg.norm(z))\n if nrm > 0:\n z = z / nrm\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": np.zeros((dim,), dtype=np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_tests\": np.zeros((dim,), dtype=np.float32),\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\ndef join_embeddings(z_old: Optional[np.ndarray], z_new: Optional[np.ndarray], *, w_old: float = 1.0, w_new: float = 1.0) -> Optional[np.ndarray]:\n \"\"\"Monotone anytime join of two embedding vectors.\n\n Returns a unit-normalized convex-like combination w_old*unit(z_old) + w_new*unit(z_new).\n If both are None, returns None.\n \"\"\"\n try:\n def _unit(a: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(a))\n return (a / n) if n > 0 else a\n# ... truncated ...","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding.auto_model_dims","uri":"program://Program_Conditioned_Adapter/function/modules.embedding.auto_model_dims#L15-L24","kind":"function","name":"auto_model_dims","path":"modules/embedding.py","language":"python","start_line":15,"end_line":24,"context_start_line":1,"context_end_line":44,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport os\nimport re\nimport numpy as np\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\nfrom modules.program_graph import ProgramGraph, Artifact # type: ignore\n\nEMBED_DIM_DEFAULT = 128\nHASH_SEEDS = [1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344]\n\ndef auto_model_dims(model_id: str, cache_dir: Optional[str]) -> Tuple[int, int]:\n try:\n from transformers import AutoConfig # type: ignore\n\n cfg = AutoConfig.from_pretrained(model_id, cache_dir=cache_dir)\n n_layers = int(getattr(cfg, \"num_hidden_layers\", 0) or 0)\n d_model = int(getattr(cfg, \"hidden_size\", 0) or 0)\n return n_layers, d_model\n except Exception:\n return 0, 0\n\n\ndef _stable_hash(text: str, seed: int = 0) -> int:\n \"\"\"64-bit FNV-1a; returns positive 63-bit for stable modulo ops.\"\"\"\n mask64 = (1 << 64) - 1\n h = (1469598103934665603 ^ (seed & mask64)) & mask64\n fnv_prime = 1099511628211\n for ch in text.encode(\"utf-8\", errors=\"ignore\"):\n h ^= ch\n h = (h * fnv_prime) & mask64\n return h & ((1 << 63) - 1)\n\n\ndef _feature_hash(values: List[Tuple[str, float]], dim: int, seed: int) -> np.ndarray:\n \"\"\"Vectorized feature hashing with stable sign seed (≈10× faster).\"\"\"\n if not values:\n return np.zeros((dim,), dtype=np.float32)\n keys = [k for (k, _w) in values]\n weights = np.array([float(w) for (_k, w) in values], dtype=np.float32)\n idx = np.fromiter(((","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._stable_hash","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._stable_hash#L27-L35","kind":"function","name":"_stable_hash","path":"modules/embedding.py","language":"python","start_line":27,"end_line":35,"context_start_line":7,"context_end_line":55,"code":" import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\nfrom modules.program_graph import ProgramGraph, Artifact # type: ignore\n\nEMBED_DIM_DEFAULT = 128\nHASH_SEEDS = [1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344]\n\ndef auto_model_dims(model_id: str, cache_dir: Optional[str]) -> Tuple[int, int]:\n try:\n from transformers import AutoConfig # type: ignore\n\n cfg = AutoConfig.from_pretrained(model_id, cache_dir=cache_dir)\n n_layers = int(getattr(cfg, \"num_hidden_layers\", 0) or 0)\n d_model = int(getattr(cfg, \"hidden_size\", 0) or 0)\n return n_layers, d_model\n except Exception:\n return 0, 0\n\n\ndef _stable_hash(text: str, seed: int = 0) -> int:\n \"\"\"64-bit FNV-1a; returns positive 63-bit for stable modulo ops.\"\"\"\n mask64 = (1 << 64) - 1\n h = (1469598103934665603 ^ (seed & mask64)) & mask64\n fnv_prime = 1099511628211\n for ch in text.encode(\"utf-8\", errors=\"ignore\"):\n h ^= ch\n h = (h * fnv_prime) & mask64\n return h & ((1 << 63) - 1)\n\n\ndef _feature_hash(values: List[Tuple[str, float]], dim: int, seed: int) -> np.ndarray:\n \"\"\"Vectorized feature hashing with stable sign seed (≈10× faster).\"\"\"\n if not values:\n return np.zeros((dim,), dtype=np.float32)\n keys = [k for (k, _w) in values]\n weights = np.array([float(w) for (_k, w) in values], dtype=np.float32)\n idx = np.fromiter(((\n _stable_hash(k, seed=seed) % dim\n ) for k in keys), dtype=np.int64)\n # use seed+1 for sign; lowest bit decides sign\n signs = np.fromiter(((\n 1.0 if ((_stable_hash(k + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n ) for k in keys), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n np.add.at(vec, idx, signs * weights)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._feature_hash","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._feature_hash#L38-L54","kind":"function","name":"_feature_hash","path":"modules/embedding.py","language":"python","start_line":38,"end_line":54,"context_start_line":18,"context_end_line":74,"code":"\n cfg = AutoConfig.from_pretrained(model_id, cache_dir=cache_dir)\n n_layers = int(getattr(cfg, \"num_hidden_layers\", 0) or 0)\n d_model = int(getattr(cfg, \"hidden_size\", 0) or 0)\n return n_layers, d_model\n except Exception:\n return 0, 0\n\n\ndef _stable_hash(text: str, seed: int = 0) -> int:\n \"\"\"64-bit FNV-1a; returns positive 63-bit for stable modulo ops.\"\"\"\n mask64 = (1 << 64) - 1\n h = (1469598103934665603 ^ (seed & mask64)) & mask64\n fnv_prime = 1099511628211\n for ch in text.encode(\"utf-8\", errors=\"ignore\"):\n h ^= ch\n h = (h * fnv_prime) & mask64\n return h & ((1 << 63) - 1)\n\n\ndef _feature_hash(values: List[Tuple[str, float]], dim: int, seed: int) -> np.ndarray:\n \"\"\"Vectorized feature hashing with stable sign seed (≈10× faster).\"\"\"\n if not values:\n return np.zeros((dim,), dtype=np.float32)\n keys = [k for (k, _w) in values]\n weights = np.array([float(w) for (_k, w) in values], dtype=np.float32)\n idx = np.fromiter(((\n _stable_hash(k, seed=seed) % dim\n ) for k in keys), dtype=np.int64)\n # use seed+1 for sign; lowest bit decides sign\n signs = np.fromiter(((\n 1.0 if ((_stable_hash(k + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n ) for k in keys), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n np.add.at(vec, idx, signs * weights)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _accumulate_sparse(acc: Dict[int, float], key: str, weight: float, dim: int, seed: int) -> None:\n idx = int(_stable_hash(key, seed=seed) % dim)\n sign = 1.0 if ((_stable_hash(key + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n acc[idx] = float(acc.get(idx, 0.0)) + float(sign * weight)\n\n\ndef _dense_from_sparse(acc: Dict[int, float], dim: int) -> np.ndarray:\n if not acc:\n return np.zeros((dim,), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n for i, v in acc.items():\n if 0 <= int(i) < dim:\n vec[int(i)] = vec[int(i)] + float(v)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _artifact_rel_path(art: Artifact) -> Optional[str]:","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._accumulate_sparse","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._accumulate_sparse#L57-L60","kind":"function","name":"_accumulate_sparse","path":"modules/embedding.py","language":"python","start_line":57,"end_line":60,"context_start_line":37,"context_end_line":80,"code":"\ndef _feature_hash(values: List[Tuple[str, float]], dim: int, seed: int) -> np.ndarray:\n \"\"\"Vectorized feature hashing with stable sign seed (≈10× faster).\"\"\"\n if not values:\n return np.zeros((dim,), dtype=np.float32)\n keys = [k for (k, _w) in values]\n weights = np.array([float(w) for (_k, w) in values], dtype=np.float32)\n idx = np.fromiter(((\n _stable_hash(k, seed=seed) % dim\n ) for k in keys), dtype=np.int64)\n # use seed+1 for sign; lowest bit decides sign\n signs = np.fromiter(((\n 1.0 if ((_stable_hash(k + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n ) for k in keys), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n np.add.at(vec, idx, signs * weights)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _accumulate_sparse(acc: Dict[int, float], key: str, weight: float, dim: int, seed: int) -> None:\n idx = int(_stable_hash(key, seed=seed) % dim)\n sign = 1.0 if ((_stable_hash(key + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n acc[idx] = float(acc.get(idx, 0.0)) + float(sign * weight)\n\n\ndef _dense_from_sparse(acc: Dict[int, float], dim: int) -> np.ndarray:\n if not acc:\n return np.zeros((dim,), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n for i, v in acc.items():\n if 0 <= int(i) < dim:\n vec[int(i)] = vec[int(i)] + float(v)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _artifact_rel_path(art: Artifact) -> Optional[str]:\n try:\n # Expect URIs like program:///artifact/\n u = art.uri\n if \"/artifact/\" in u:\n return u.split(\"/artifact/\", 1)[-1]\n return None","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._dense_from_sparse","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._dense_from_sparse#L63-L71","kind":"function","name":"_dense_from_sparse","path":"modules/embedding.py","language":"python","start_line":63,"end_line":71,"context_start_line":43,"context_end_line":91,"code":" weights = np.array([float(w) for (_k, w) in values], dtype=np.float32)\n idx = np.fromiter(((\n _stable_hash(k, seed=seed) % dim\n ) for k in keys), dtype=np.int64)\n # use seed+1 for sign; lowest bit decides sign\n signs = np.fromiter(((\n 1.0 if ((_stable_hash(k + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n ) for k in keys), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n np.add.at(vec, idx, signs * weights)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _accumulate_sparse(acc: Dict[int, float], key: str, weight: float, dim: int, seed: int) -> None:\n idx = int(_stable_hash(key, seed=seed) % dim)\n sign = 1.0 if ((_stable_hash(key + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n acc[idx] = float(acc.get(idx, 0.0)) + float(sign * weight)\n\n\ndef _dense_from_sparse(acc: Dict[int, float], dim: int) -> np.ndarray:\n if not acc:\n return np.zeros((dim,), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n for i, v in acc.items():\n if 0 <= int(i) < dim:\n vec[int(i)] = vec[int(i)] + float(v)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _artifact_rel_path(art: Artifact) -> Optional[str]:\n try:\n # Expect URIs like program:///artifact/\n u = art.uri\n if \"/artifact/\" in u:\n return u.split(\"/artifact/\", 1)[-1]\n return None\n except Exception:\n return None\n\n\ndef build_program_embedding(\n pg: ProgramGraph,\n *,\n sources_root: Optional[str] = None,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_text: bool = False,","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._artifact_rel_path","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._artifact_rel_path#L74-L82","kind":"function","name":"_artifact_rel_path","path":"modules/embedding.py","language":"python","start_line":74,"end_line":82,"context_start_line":54,"context_end_line":102,"code":" return (vec / nrm) if nrm > 0 else vec\n\n\ndef _accumulate_sparse(acc: Dict[int, float], key: str, weight: float, dim: int, seed: int) -> None:\n idx = int(_stable_hash(key, seed=seed) % dim)\n sign = 1.0 if ((_stable_hash(key + \"#\", seed=seed + 1) & 1) == 0) else -1.0\n acc[idx] = float(acc.get(idx, 0.0)) + float(sign * weight)\n\n\ndef _dense_from_sparse(acc: Dict[int, float], dim: int) -> np.ndarray:\n if not acc:\n return np.zeros((dim,), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n for i, v in acc.items():\n if 0 <= int(i) < dim:\n vec[int(i)] = vec[int(i)] + float(v)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _artifact_rel_path(art: Artifact) -> Optional[str]:\n try:\n # Expect URIs like program:///artifact/\n u = art.uri\n if \"/artifact/\" in u:\n return u.split(\"/artifact/\", 1)[-1]\n return None\n except Exception:\n return None\n\n\ndef build_program_embedding(\n pg: ProgramGraph,\n *,\n sources_root: Optional[str] = None,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20, # kept for signature parity; may be neutral here\n tests_weight: float = 0.15, # kept for parity; may be neutral here\n contracts_kv: Optional[Dict[str, str]] = None,\n contracts_weight: float = 0.10,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n ignore: Optional[List[str]] = None,","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding.build_program_embedding","uri":"program://Program_Conditioned_Adapter/function/modules.embedding.build_program_embedding#L85-L322","kind":"function","name":"build_program_embedding","path":"modules/embedding.py","language":"python","start_line":85,"end_line":322,"context_start_line":65,"context_end_line":342,"code":" return np.zeros((dim,), dtype=np.float32)\n vec = np.zeros((dim,), dtype=np.float32)\n for i, v in acc.items():\n if 0 <= int(i) < dim:\n vec[int(i)] = vec[int(i)] + float(v)\n nrm = float(np.linalg.norm(vec))\n return (vec / nrm) if nrm > 0 else vec\n\n\ndef _artifact_rel_path(art: Artifact) -> Optional[str]:\n try:\n # Expect URIs like program:///artifact/\n u = art.uri\n if \"/artifact/\" in u:\n return u.split(\"/artifact/\", 1)[-1]\n return None\n except Exception:\n return None\n\n\ndef build_program_embedding(\n pg: ProgramGraph,\n *,\n sources_root: Optional[str] = None,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20, # kept for signature parity; may be neutral here\n tests_weight: float = 0.15, # kept for parity; may be neutral here\n contracts_kv: Optional[Dict[str, str]] = None,\n contracts_weight: float = 0.10,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n ignore: Optional[List[str]] = None,\n) -> Dict[str, np.ndarray]:\n \"\"\"Program-agnostic embedding from a ProgramGraph.\"\"\"\n # Symbolic/channel features from entities\n sym_feats: List[Tuple[str, float]] = []\n mod_feats: List[Tuple[str, float]] = []\n owners: List[str] = []\n try:\n for e in pg.entities():\n sym_feats.append((f\"{e.kind}:{e.name}\", 1.0))\n if e.owner:\n owners.append(e.owner)\n mod_feats.append((f\"owner:{e.owner}\", 0.2))\n except Exception:\n pass\n\n # Topology from edges aggregated by owner and entity degrees\n indeg: Dict[str, int] = {}\n outdeg: Dict[str, int] = {}\n ent_indeg: Dict[str, int] = {}\n ent_outdeg: Dict[str, int] = {}\n try:\n for ed in pg.edges():\n ent_outdeg[ed.src] = ent_outdeg.get(ed.src, 0) + 1\n ent_indeg[ed.dst] = ent_indeg.get(ed.dst, 0) + 1\n # Map entity degrees to owners if available\n ent_owner: Dict[str, Optional[str]] = {}\n try:\n for e in pg.entities():\n ent_owner[e.id] = e.owner\n except Exception:\n ent_owner = {}\n for ent, d in ent_indeg.items():\n ow = ent_owner.get(ent)\n if ow:\n indeg[ow] = indeg.get(ow, 0) + int(d)\n for ent, d in ent_outdeg.items():\n ow = ent_owner.get(ent)\n if ow:\n outdeg[ow] = outdeg.get(ow, 0) + int(d)\n except Exception:\n pass\n\n # Add simple file/dir topology derived from artifacts to ensure z_top density\n dir_counts: Dict[str, int] = {}\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel:\n continue\n dname = os.path.dirname(rel).replace(\"\\\\\", \"/\")\n dir_counts[dname] = dir_counts.get(dname, 0) + 1\n except Exception:\n # Fallback: approximate by walking sources_root if available\n if sources_root:\n try:\n for dirpath, _dirnames, filenames in os.walk(sources_root):\n rel_dir = os.path.relpath(dirpath, sources_root).replace(\"\\\\\", \"/\")\n if rel_dir == \".\":\n rel_dir = \"\"\n if filenames:\n dir_counts[rel_dir] = dir_counts.get(rel_dir, 0) + len(filenames)\n except Exception:\n pass\n topo_feats: List[Tuple[str, float]] = []\n owners_unique = sorted(set(owners))\n for ow in owners_unique:\n topo_feats.append((f\"indeg:{ow}\", float(indeg.get(ow, 0))))\n topo_feats.append((f\"outdeg:{ow}\", float(outdeg.get(ow, 0))))\n for d, c in dir_counts.items():\n topo_feats.append((f\"dir:{d}\", float(c)))\n\n # No dedicated doc/types/calls views at generic layer; set to zeros\n z_sym = _feature_hash(sym_feats, dim, seed + HASH_SEEDS[0])\n z_doc = np.zeros((dim,), dtype=np.float32)\n z_mod = _feature_hash(mod_feats, dim, seed + HASH_SEEDS[2])\n z_top = _feature_hash(topo_feats, dim, seed + HASH_SEEDS[3])\n z_types = np.zeros((dim,), dtype=np.float32)\n z_calls = np.zeros((dim,), dtype=np.float32)\n # Optional contracts channel: hash key/value hints from ProgramContracts\n z_contracts = np.zeros((dim,), dtype=np.float32)\n try:\n if contracts_kv:\n feats_c: List[Tuple[str, float]] = []\n for k, v in list(contracts_kv.items()):\n if v is None:\n continue\n feats_c.append((f\"contracts:{str(k)}:{str(v)}\", 1.0))\n if feats_c:\n z_contracts = _feature_hash(feats_c, dim, seed + HASH_SEEDS[6])\n except Exception:\n z_contracts = np.zeros((dim,), dtype=np.float32)\n\n # Optional textual channel from artifacts(kind=\"source\")\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0 and sources_root:\n ignore_list = [os.path.normpath(p) for p in (ignore or [])]\n def _ignored(rel: str) -> bool:\n r = os.path.normpath(rel)\n for pat in ignore_list:\n if r == pat or r.startswith(pat + os.sep):\n return True\n return False\n text_acc: Dict[int, float] = {}\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n files: List[str] = []\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel or _ignored(rel):\n continue\n ap = os.path.abspath(os.path.join(sources_root, rel))\n if os.path.isfile(ap):\n files.append(ap)\n except Exception:\n files = []\n # Fallback if ProgramGraph provided no artifacts\n if not files:\n try:\n exts_all = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n for root, dirs, fnames in os.walk(sources_root):\n rel_root = os.path.relpath(root, sources_root)\n if _ignored(rel_root):\n dirs[:] = []\n continue\n dirs[:] = [d for d in dirs if not _ignored(os.path.join(rel_root, d))]\n for fn in fnames:\n fp = os.path.join(root, fn)\n rel_fp = os.path.relpath(fp, sources_root)\n if _ignored(rel_fp):\n continue\n if os.path.splitext(fp)[1].lower() in exts_all:\n files.append(fp)\n except Exception:\n files = []\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if bytes_budget is not None and bytes_budget <= 0:\n pass\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n\n def _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n z = (\n _unit(z_sym)\n + _unit(z_mod)\n + _unit(z_top)\n + float(text_weight) * _unit(z_text)\n + float(types_weight) * _unit(z_types)\n + float(calls_weight) * _unit(z_calls)\n + float(max(0.0, contracts_weight)) * _unit(z_contracts)\n + float(tests_weight) * _unit(np.zeros_like(z_sym))\n )\n nrm = float(np.linalg.norm(z))\n if nrm > 0:\n z = z / nrm\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": z_doc.astype(np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_contracts\": z_contracts.astype(np.float32),\n \"z_tests\": np.zeros((dim,), dtype=np.float32),\n \"sparsity\": {\n \"z_sym\": float((np.count_nonzero(z_sym) / float(max(1, z_sym.size)))),\n \"z_doc\": float((np.count_nonzero(z_doc) / float(max(1, z_doc.size)))),\n \"z_mod\": float((np.count_nonzero(z_mod) / float(max(1, z_mod.size)))),\n \"z_top\": float((np.count_nonzero(z_top) / float(max(1, z_top.size)))),\n \"z_text\": float((np.count_nonzero(z_text) / float(max(1, z_text.size)))),\n \"z_types\": float((np.count_nonzero(z_types) / float(max(1, z_types.size)))),\n \"z_calls\": float((np.count_nonzero(z_calls) / float(max(1, z_calls.size)))),\n \"z_contracts\": float((np.count_nonzero(z_contracts) / float(max(1, z_contracts.size)))),\n \"z_tests\": 0.0,\n },\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\n\ndef build_subgraph_embedding_from_program(\n pg: ProgramGraph,\n *,\n sources_root: Optional[str] = None,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_owners: Optional[List[str]] = None,\n include_artifact_paths: Optional[List[str]] = None,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20,\n tests_weight: float = 0.15,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n) -> Dict[str, np.ndarray]:","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding.build_subgraph_embedding_from_program","uri":"program://Program_Conditioned_Adapter/function/modules.embedding.build_subgraph_embedding_from_program#L325-L473","kind":"function","name":"build_subgraph_embedding_from_program","path":"modules/embedding.py","language":"python","start_line":325,"end_line":473,"context_start_line":305,"context_end_line":493,"code":" \"z_calls\": z_calls.astype(np.float32),\n \"z_contracts\": z_contracts.astype(np.float32),\n \"z_tests\": np.zeros((dim,), dtype=np.float32),\n \"sparsity\": {\n \"z_sym\": float((np.count_nonzero(z_sym) / float(max(1, z_sym.size)))),\n \"z_doc\": float((np.count_nonzero(z_doc) / float(max(1, z_doc.size)))),\n \"z_mod\": float((np.count_nonzero(z_mod) / float(max(1, z_mod.size)))),\n \"z_top\": float((np.count_nonzero(z_top) / float(max(1, z_top.size)))),\n \"z_text\": float((np.count_nonzero(z_text) / float(max(1, z_text.size)))),\n \"z_types\": float((np.count_nonzero(z_types) / float(max(1, z_types.size)))),\n \"z_calls\": float((np.count_nonzero(z_calls) / float(max(1, z_calls.size)))),\n \"z_contracts\": float((np.count_nonzero(z_contracts) / float(max(1, z_contracts.size)))),\n \"z_tests\": 0.0,\n },\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\n\ndef build_subgraph_embedding_from_program(\n pg: ProgramGraph,\n *,\n sources_root: Optional[str] = None,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_owners: Optional[List[str]] = None,\n include_artifact_paths: Optional[List[str]] = None,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20,\n tests_weight: float = 0.15,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n) -> Dict[str, np.ndarray]:\n owners_set = set(include_owners or [])\n files_set_norm: Optional[Set[str]] = None\n if include_artifact_paths:\n files_set_norm = set([p.replace(\"\\\\\", \"/\") for p in include_artifact_paths])\n # Entities filtered by owners\n sym_feats: List[Tuple[str, float]] = []\n mod_feats: List[Tuple[str, float]] = []\n try:\n for e in pg.entities():\n if owners_set and (e.owner not in owners_set):\n continue\n sym_feats.append((f\"{e.kind}:{e.name}\", 1.0))\n if e.owner:\n mod_feats.append((f\"owner:{e.owner}\", 0.2))\n except Exception:\n pass\n # Topology filtered by owners\n indeg: Dict[str, int] = {}\n outdeg: Dict[str, int] = {}\n try:\n ent_owner: Dict[str, Optional[str]] = {}\n for e in pg.entities():\n ent_owner[e.id] = e.owner\n for ed in pg.edges():\n ow_src = ent_owner.get(ed.src)\n ow_dst = ent_owner.get(ed.dst)\n if owners_set and (ow_src not in owners_set and ow_dst not in owners_set):\n continue\n if ow_src:\n outdeg[ow_src] = outdeg.get(ow_src, 0) + 1\n if ow_dst:\n indeg[ow_dst] = indeg.get(ow_dst, 0) + 1\n except Exception:\n pass\n topo_feats: List[Tuple[str, float]] = []\n for ow in sorted(set([o for o in (include_owners or []) if o])):\n topo_feats.append((f\"indeg:{ow}\", float(indeg.get(ow, 0))))\n topo_feats.append((f\"outdeg:{ow}\", float(outdeg.get(ow, 0))))\n # Text channel: only from included artifact paths\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0 and sources_root and files_set_norm:\n text_acc: Dict[int, float] = {}\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n files: List[str] = []\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel:\n continue\n # allow both repo-absolute and rel forms in allowlist\n rel_norm = rel.replace(\"\\\\\", \"/\")\n abs_norm = os.path.abspath(os.path.join(sources_root, rel_norm))\n if (rel_norm in files_set_norm) or (abs_norm in files_set_norm):\n files.append(abs_norm)\n except Exception:\n files = []\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n z_sym = _feature_hash(sym_feats, dim, seed + HASH_SEEDS[0])\n z_mod = _feature_hash(mod_feats, dim, seed + HASH_SEEDS[2])\n z_top = _feature_hash(topo_feats, dim, seed + HASH_SEEDS[3])\n z_types = np.zeros((dim,), dtype=np.float32)\n z_calls = np.zeros((dim,), dtype=np.float32)\n def _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n z = (\n _unit(z_sym)\n + _unit(z_mod)\n + _unit(z_top)\n + float(text_weight) * _unit(z_text)\n + float(types_weight) * _unit(z_types)\n + float(calls_weight) * _unit(z_calls)\n + float(tests_weight) * _unit(np.zeros_like(z_sym))\n )\n nrm = float(np.linalg.norm(z))\n if nrm > 0:\n z = z / nrm\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": np.zeros((dim,), dtype=np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_tests\": np.zeros((dim,), dtype=np.float32),\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\ndef join_embeddings(z_old: Optional[np.ndarray], z_new: Optional[np.ndarray], *, w_old: float = 1.0, w_new: float = 1.0) -> Optional[np.ndarray]:\n \"\"\"Monotone anytime join of two embedding vectors.\n\n Returns a unit-normalized convex-like combination w_old*unit(z_old) + w_new*unit(z_new).\n If both are None, returns None.\n \"\"\"\n try:\n def _unit(a: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(a))\n return (a / n) if n > 0 else a\n if z_old is None and z_new is None:\n return None\n if z_old is None:\n return _unit((max(0.0, float(w_new)) * _unit(z_new.astype(np.float32))))\n if z_new is None:\n return _unit((max(0.0, float(w_old)) * _unit(z_old.astype(np.float32))))\n a = max(0.0, float(w_old)) * _unit(z_old.astype(np.float32))\n b = max(0.0, float(w_new)) * _unit(z_new.astype(np.float32))\n return _unit(a + b)","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding.join_embeddings","uri":"program://Program_Conditioned_Adapter/function/modules.embedding.join_embeddings#L475-L495","kind":"function","name":"join_embeddings","path":"modules/embedding.py","language":"python","start_line":475,"end_line":495,"context_start_line":455,"context_end_line":495,"code":" + float(calls_weight) * _unit(z_calls)\n + float(tests_weight) * _unit(np.zeros_like(z_sym))\n )\n nrm = float(np.linalg.norm(z))\n if nrm > 0:\n z = z / nrm\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": np.zeros((dim,), dtype=np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_tests\": np.zeros((dim,), dtype=np.float32),\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\ndef join_embeddings(z_old: Optional[np.ndarray], z_new: Optional[np.ndarray], *, w_old: float = 1.0, w_new: float = 1.0) -> Optional[np.ndarray]:\n \"\"\"Monotone anytime join of two embedding vectors.\n\n Returns a unit-normalized convex-like combination w_old*unit(z_old) + w_new*unit(z_new).\n If both are None, returns None.\n \"\"\"\n try:\n def _unit(a: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(a))\n return (a / n) if n > 0 else a\n if z_old is None and z_new is None:\n return None\n if z_old is None:\n return _unit((max(0.0, float(w_new)) * _unit(z_new.astype(np.float32))))\n if z_new is None:\n return _unit((max(0.0, float(w_old)) * _unit(z_old.astype(np.float32))))\n a = max(0.0, float(w_old)) * _unit(z_old.astype(np.float32))\n b = max(0.0, float(w_new)) * _unit(z_new.astype(np.float32))\n return _unit(a + b)\n except Exception:\n return z_new if z_new is not None else z_old","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._unit","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._unit#L482-L484","kind":"function","name":"_unit","path":"modules/embedding.py","language":"python","start_line":482,"end_line":484,"context_start_line":462,"context_end_line":495,"code":" \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": np.zeros((dim,), dtype=np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_tests\": np.zeros((dim,), dtype=np.float32),\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\ndef join_embeddings(z_old: Optional[np.ndarray], z_new: Optional[np.ndarray], *, w_old: float = 1.0, w_new: float = 1.0) -> Optional[np.ndarray]:\n \"\"\"Monotone anytime join of two embedding vectors.\n\n Returns a unit-normalized convex-like combination w_old*unit(z_old) + w_new*unit(z_new).\n If both are None, returns None.\n \"\"\"\n try:\n def _unit(a: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(a))\n return (a / n) if n > 0 else a\n if z_old is None and z_new is None:\n return None\n if z_old is None:\n return _unit((max(0.0, float(w_new)) * _unit(z_new.astype(np.float32))))\n if z_new is None:\n return _unit((max(0.0, float(w_old)) * _unit(z_old.astype(np.float32))))\n a = max(0.0, float(w_old)) * _unit(z_old.astype(np.float32))\n b = max(0.0, float(w_new)) * _unit(z_new.astype(np.float32))\n return _unit(a + b)\n except Exception:\n return z_new if z_new is not None else z_old","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._ignored","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._ignored#L199-L204","kind":"function","name":"_ignored","path":"modules/embedding.py","language":"python","start_line":199,"end_line":204,"context_start_line":179,"context_end_line":224,"code":" z_types = np.zeros((dim,), dtype=np.float32)\n z_calls = np.zeros((dim,), dtype=np.float32)\n # Optional contracts channel: hash key/value hints from ProgramContracts\n z_contracts = np.zeros((dim,), dtype=np.float32)\n try:\n if contracts_kv:\n feats_c: List[Tuple[str, float]] = []\n for k, v in list(contracts_kv.items()):\n if v is None:\n continue\n feats_c.append((f\"contracts:{str(k)}:{str(v)}\", 1.0))\n if feats_c:\n z_contracts = _feature_hash(feats_c, dim, seed + HASH_SEEDS[6])\n except Exception:\n z_contracts = np.zeros((dim,), dtype=np.float32)\n\n # Optional textual channel from artifacts(kind=\"source\")\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0 and sources_root:\n ignore_list = [os.path.normpath(p) for p in (ignore or [])]\n def _ignored(rel: str) -> bool:\n r = os.path.normpath(rel)\n for pat in ignore_list:\n if r == pat or r.startswith(pat + os.sep):\n return True\n return False\n text_acc: Dict[int, float] = {}\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n files: List[str] = []\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel or _ignored(rel):\n continue\n ap = os.path.abspath(os.path.join(sources_root, rel))\n if os.path.isfile(ap):\n files.append(ap)\n except Exception:\n files = []\n # Fallback if ProgramGraph provided no artifacts\n if not files:\n try:\n exts_all = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n for root, dirs, fnames in os.walk(sources_root):","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.embedding._process_file","uri":"program://Program_Conditioned_Adapter/function/modules.embedding._process_file#L401-L423","kind":"function","name":"_process_file","path":"modules/embedding.py","language":"python","start_line":401,"end_line":423,"context_start_line":381,"context_end_line":443,"code":" # Text channel: only from included artifact paths\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0 and sources_root and files_set_norm:\n text_acc: Dict[int, float] = {}\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n files: List[str] = []\n try:\n for art in pg.artifacts(\"source\"):\n rel = _artifact_rel_path(art)\n if not rel:\n continue\n # allow both repo-absolute and rel forms in allowlist\n rel_norm = rel.replace(\"\\\\\", \"/\")\n abs_norm = os.path.abspath(os.path.join(sources_root, rel_norm))\n if (rel_norm in files_set_norm) or (abs_norm in files_set_norm):\n files.append(abs_norm)\n except Exception:\n files = []\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n z_sym = _feature_hash(sym_feats, dim, seed + HASH_SEEDS[0])\n z_mod = _feature_hash(mod_feats, dim, seed + HASH_SEEDS[2])\n z_top = _feature_hash(topo_feats, dim, seed + HASH_SEEDS[3])","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runtime","uri":"program://Program_Conditioned_Adapter/module/modules.runtime#L1-L115","kind":"module","name":"modules.runtime","path":"modules/runtime.py","language":"python","start_line":1,"end_line":115,"context_start_line":1,"context_end_line":115,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple\nimport os\nimport subprocess\n\nimport numpy as np\nimport torch\n\n# Reuse existing module utilities\n# No example-specific imports in core runtime\nfrom model.inspect import detect_target_names_from_model_full\n\n@dataclass\nclass OTFFlags:\n of_sources: str = \"question\" # \"zoom\" | \"question\"\n zoom_symbol: Optional[str] = None\n zoom_radius: int = 1\n include_text: bool = True\n text_max_bytes: int = 250_000\n max_text_tokens: int = 200_000\n text_weight: float = 0.25\n\n\ndef get_device() -> torch.device:\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\n\n\n\ndef build_per_layer_deltas(\n adapters: Dict[str, List[Dict[str, Dict[str, np.ndarray]]]],\n target_names: List[str],\n *,\n g_sub: float = 1.0,\n base_adapters: Optional[Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]] = None,\n) -> List[Dict[str, torch.Tensor]]:\n \"\"\"Return per-layer dict of short target name -> delta weight tensor (out,in).\"\"\"\n layers_out: List[Dict[str, torch.Tensor]] = []\n num_layers = len(adapters.get(\"layers\", []))\n for i in range(num_layers):\n dest: Dict[str, torch.Tensor] = {}\n cur = adapters[\"layers\"][i]\n base = (\n base_adapters[\"layers\"][i]\n if base_adapters is not None and i < len(base_adapters.get(\"layers\", []))\n else None\n )\n for name in target_names:\n acc: Optional[torch.Tensor] = None\n # Base component\n if base is not None and name in base:\n A = torch.from_numpy(base[name][\"A\"]).to(torch.float32)\n B = torch.from_numpy(base[name][\"B\"]).to(torch.float32)\n acc = (A @ B)\n # Subgraph component\n if name in cur:\n A = torch.from_numpy(cur[name][\"A\"]).to(torch.float32)\n B = torch.from_numpy(cur[name][\"B\"]).to(torch.float32)\n sub = (A @ B)\n acc = (\n sub\n if acc is None\n else ((1.0 - float(g_sub)) * acc + float(g_sub) * sub)\n )\n if acc is not None:\n dest[name] = acc.contiguous()\n layers_out.append(dest)\n return layers_out\n\n\ndef run_program_adapter(\n model: str,\n adapters_npz: str,\n program: str,\n prompt: str,\n *,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n ignore: Optional[List[str]] = None,\n timeout_sec: Optional[int] = None,\n) -> Tuple[int, str, str]:\n \"\"\"Produce an answer with citations via in-process orchestrator; return (exit_code, stdout, stderr).\"\"\"\n try:\n from modules.runner import generate_answer\n text = generate_answer(\n model_id=model,\n adapters_npz=adapters_npz,\n prompt=prompt,\n program_root=program,\n cache_dir=cache_dir,\n device=device,\n gpu_ids=gpu_ids,\n pack_context=True,\n context_tokens=int(context_tokens),\n require_citations=True,\n )\n return 0, text, \"\"\n except Exception as e:\n return 1, \"\", str(e)\n","source_hash":"7cf1fff47ef64c55d62db5bbc76e1e5724bfa452ac4dd0aab0a8b78b84144154","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runtime.OTFFlags","uri":"program://Program_Conditioned_Adapter/class/modules.runtime.OTFFlags#L16-L23","kind":"class","name":"OTFFlags","path":"modules/runtime.py","language":"python","start_line":16,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple\nimport os\nimport subprocess\n\nimport numpy as np\nimport torch\n\n# Reuse existing module utilities\n# No example-specific imports in core runtime\nfrom model.inspect import detect_target_names_from_model_full\n\n@dataclass\nclass OTFFlags:\n of_sources: str = \"question\" # \"zoom\" | \"question\"\n zoom_symbol: Optional[str] = None\n zoom_radius: int = 1\n include_text: bool = True\n text_max_bytes: int = 250_000\n max_text_tokens: int = 200_000\n text_weight: float = 0.25\n\n\ndef get_device() -> torch.device:\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\n\n\n\ndef build_per_layer_deltas(","source_hash":"7cf1fff47ef64c55d62db5bbc76e1e5724bfa452ac4dd0aab0a8b78b84144154","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runtime.get_device","uri":"program://Program_Conditioned_Adapter/function/modules.runtime.get_device#L26-L31","kind":"function","name":"get_device","path":"modules/runtime.py","language":"python","start_line":26,"end_line":31,"context_start_line":6,"context_end_line":51,"code":"import subprocess\n\nimport numpy as np\nimport torch\n\n# Reuse existing module utilities\n# No example-specific imports in core runtime\nfrom model.inspect import detect_target_names_from_model_full\n\n@dataclass\nclass OTFFlags:\n of_sources: str = \"question\" # \"zoom\" | \"question\"\n zoom_symbol: Optional[str] = None\n zoom_radius: int = 1\n include_text: bool = True\n text_max_bytes: int = 250_000\n max_text_tokens: int = 200_000\n text_weight: float = 0.25\n\n\ndef get_device() -> torch.device:\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\n\n\n\ndef build_per_layer_deltas(\n adapters: Dict[str, List[Dict[str, Dict[str, np.ndarray]]]],\n target_names: List[str],\n *,\n g_sub: float = 1.0,\n base_adapters: Optional[Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]] = None,\n) -> List[Dict[str, torch.Tensor]]:\n \"\"\"Return per-layer dict of short target name -> delta weight tensor (out,in).\"\"\"\n layers_out: List[Dict[str, torch.Tensor]] = []","source_hash":"7cf1fff47ef64c55d62db5bbc76e1e5724bfa452ac4dd0aab0a8b78b84144154","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runtime.infer_target_names","uri":"program://Program_Conditioned_Adapter/function/modules.runtime.infer_target_names#L34-L38","kind":"function","name":"infer_target_names","path":"modules/runtime.py","language":"python","start_line":34,"end_line":38,"context_start_line":14,"context_end_line":58,"code":"\n@dataclass\nclass OTFFlags:\n of_sources: str = \"question\" # \"zoom\" | \"question\"\n zoom_symbol: Optional[str] = None\n zoom_radius: int = 1\n include_text: bool = True\n text_max_bytes: int = 250_000\n max_text_tokens: int = 200_000\n text_weight: float = 0.25\n\n\ndef get_device() -> torch.device:\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\n\n\n\ndef build_per_layer_deltas(\n adapters: Dict[str, List[Dict[str, Dict[str, np.ndarray]]]],\n target_names: List[str],\n *,\n g_sub: float = 1.0,\n base_adapters: Optional[Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]] = None,\n) -> List[Dict[str, torch.Tensor]]:\n \"\"\"Return per-layer dict of short target name -> delta weight tensor (out,in).\"\"\"\n layers_out: List[Dict[str, torch.Tensor]] = []\n num_layers = len(adapters.get(\"layers\", []))\n for i in range(num_layers):\n dest: Dict[str, torch.Tensor] = {}\n cur = adapters[\"layers\"][i]\n base = (\n base_adapters[\"layers\"][i]\n if base_adapters is not None and i < len(base_adapters.get(\"layers\", []))","source_hash":"7cf1fff47ef64c55d62db5bbc76e1e5724bfa452ac4dd0aab0a8b78b84144154","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runtime.build_per_layer_deltas","uri":"program://Program_Conditioned_Adapter/function/modules.runtime.build_per_layer_deltas#L43-L81","kind":"function","name":"build_per_layer_deltas","path":"modules/runtime.py","language":"python","start_line":43,"end_line":81,"context_start_line":23,"context_end_line":101,"code":" text_weight: float = 0.25\n\n\ndef get_device() -> torch.device:\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n if getattr(torch.backends, \"mps\", None) and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n return torch.device(\"cpu\")\n\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\n\n\n\ndef build_per_layer_deltas(\n adapters: Dict[str, List[Dict[str, Dict[str, np.ndarray]]]],\n target_names: List[str],\n *,\n g_sub: float = 1.0,\n base_adapters: Optional[Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]] = None,\n) -> List[Dict[str, torch.Tensor]]:\n \"\"\"Return per-layer dict of short target name -> delta weight tensor (out,in).\"\"\"\n layers_out: List[Dict[str, torch.Tensor]] = []\n num_layers = len(adapters.get(\"layers\", []))\n for i in range(num_layers):\n dest: Dict[str, torch.Tensor] = {}\n cur = adapters[\"layers\"][i]\n base = (\n base_adapters[\"layers\"][i]\n if base_adapters is not None and i < len(base_adapters.get(\"layers\", []))\n else None\n )\n for name in target_names:\n acc: Optional[torch.Tensor] = None\n # Base component\n if base is not None and name in base:\n A = torch.from_numpy(base[name][\"A\"]).to(torch.float32)\n B = torch.from_numpy(base[name][\"B\"]).to(torch.float32)\n acc = (A @ B)\n # Subgraph component\n if name in cur:\n A = torch.from_numpy(cur[name][\"A\"]).to(torch.float32)\n B = torch.from_numpy(cur[name][\"B\"]).to(torch.float32)\n sub = (A @ B)\n acc = (\n sub\n if acc is None\n else ((1.0 - float(g_sub)) * acc + float(g_sub) * sub)\n )\n if acc is not None:\n dest[name] = acc.contiguous()\n layers_out.append(dest)\n return layers_out\n\n\ndef run_program_adapter(\n model: str,\n adapters_npz: str,\n program: str,\n prompt: str,\n *,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n ignore: Optional[List[str]] = None,\n timeout_sec: Optional[int] = None,\n) -> Tuple[int, str, str]:\n \"\"\"Produce an answer with citations via in-process orchestrator; return (exit_code, stdout, stderr).\"\"\"\n try:\n from modules.runner import generate_answer\n text = generate_answer(\n model_id=model,","source_hash":"7cf1fff47ef64c55d62db5bbc76e1e5724bfa452ac4dd0aab0a8b78b84144154","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runtime.run_program_adapter","uri":"program://Program_Conditioned_Adapter/function/modules.runtime.run_program_adapter#L84-L114","kind":"function","name":"run_program_adapter","path":"modules/runtime.py","language":"python","start_line":84,"end_line":114,"context_start_line":64,"context_end_line":115,"code":" if base is not None and name in base:\n A = torch.from_numpy(base[name][\"A\"]).to(torch.float32)\n B = torch.from_numpy(base[name][\"B\"]).to(torch.float32)\n acc = (A @ B)\n # Subgraph component\n if name in cur:\n A = torch.from_numpy(cur[name][\"A\"]).to(torch.float32)\n B = torch.from_numpy(cur[name][\"B\"]).to(torch.float32)\n sub = (A @ B)\n acc = (\n sub\n if acc is None\n else ((1.0 - float(g_sub)) * acc + float(g_sub) * sub)\n )\n if acc is not None:\n dest[name] = acc.contiguous()\n layers_out.append(dest)\n return layers_out\n\n\ndef run_program_adapter(\n model: str,\n adapters_npz: str,\n program: str,\n prompt: str,\n *,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n ignore: Optional[List[str]] = None,\n timeout_sec: Optional[int] = None,\n) -> Tuple[int, str, str]:\n \"\"\"Produce an answer with citations via in-process orchestrator; return (exit_code, stdout, stderr).\"\"\"\n try:\n from modules.runner import generate_answer\n text = generate_answer(\n model_id=model,\n adapters_npz=adapters_npz,\n prompt=prompt,\n program_root=program,\n cache_dir=cache_dir,\n device=device,\n gpu_ids=gpu_ids,\n pack_context=True,\n context_tokens=int(context_tokens),\n require_citations=True,\n )\n return 0, text, \"\"\n except Exception as e:\n return 1, \"\", str(e)\n","source_hash":"7cf1fff47ef64c55d62db5bbc76e1e5724bfa452ac4dd0aab0a8b78b84144154","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches","uri":"program://Program_Conditioned_Adapter/module/modules.caches#L1-L87","kind":"module","name":"modules.caches","path":"modules/caches.py","language":"python","start_line":1,"end_line":87,"context_start_line":1,"context_end_line":87,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom typing import Dict, Iterable, Iterator, List, Optional, Tuple\n\ndef _ensure_path(p: Optional[str]) -> Optional[str]:\n if not p:\n return None\n pth = os.path.abspath(os.path.expanduser(os.path.expandvars(p)))\n return pth if os.path.exists(pth) else None\n\ndef load_manifest(adapters_dir: str) -> Dict:\n mf = os.path.join(adapters_dir, \"manifest.json\")\n try:\n return json.loads(open(mf, \"r\", encoding=\"utf-8\").read())\n except Exception:\n return {}\n\ndef resolve_cache_path(manifest: Dict, key: str, default_path: str) -> str:\n try:\n p = manifest.get(\"caches\", {}).get(key, {}).get(\"path\")\n p = _ensure_path(p)\n if p:\n return p\n except Exception:\n pass\n return default_path\n\ndef iter_jsonl(path: str) -> Iterator[Dict]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n ln = ln.strip()\n if not ln:\n continue\n try:\n yield json.loads(ln)\n except Exception:\n continue\n except Exception:\n return iter(())\n\ndef load_symbol_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n sym_path = resolve_cache_path(mf, \"symbol_index\", os.path.join(adapters_dir, \"symbol_index.jsonl\"))\n return list(iter_jsonl(sym_path))\n\ndef load_windows_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n win_path = resolve_cache_path(mf, \"windows_index\", os.path.join(adapters_dir, \"windows_index.jsonl\"))\n return list(iter_jsonl(win_path))\n\ndef load_facts(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n facts_path = resolve_cache_path(mf, \"facts\", os.path.join(adapters_dir, \"facts.jsonl\"))\n return list(iter_jsonl(facts_path))\n\ndef pick_files_from_windows(program_root: str, windows: List[Dict], prompt: str, k: int = 8) -> List[str]:\n # Lightweight scoring: overlap of prompt tokens with filename and uri; prefer unique files\n import re\n toks = set([t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3])\n scored: List[Tuple[float, str]] = []\n seen = set()\n for w in windows:\n rel = str(w.get(\"path\") or \"\").replace(\"\\\\\", \"/\")\n if not rel:\n continue\n if rel in seen:\n continue\n base = os.path.basename(rel).lower()\n uri = str(w.get(\"uri\") or \"\").lower()\n score = 0.0\n for t in toks:\n if t in base:\n score += 1.0\n if t in uri:\n score += 0.5\n if score > 0.0:\n scored.append((score, rel))\n seen.add(rel)\n scored.sort(key=lambda x: x[0], reverse=True)\n files = [rel for (_s, rel) in scored[: max(1, k)]]\n return files\n\n\n","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches._ensure_path","uri":"program://Program_Conditioned_Adapter/function/modules.caches._ensure_path#L7-L11","kind":"function","name":"_ensure_path","path":"modules/caches.py","language":"python","start_line":7,"end_line":11,"context_start_line":1,"context_end_line":31,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom typing import Dict, Iterable, Iterator, List, Optional, Tuple\n\ndef _ensure_path(p: Optional[str]) -> Optional[str]:\n if not p:\n return None\n pth = os.path.abspath(os.path.expanduser(os.path.expandvars(p)))\n return pth if os.path.exists(pth) else None\n\ndef load_manifest(adapters_dir: str) -> Dict:\n mf = os.path.join(adapters_dir, \"manifest.json\")\n try:\n return json.loads(open(mf, \"r\", encoding=\"utf-8\").read())\n except Exception:\n return {}\n\ndef resolve_cache_path(manifest: Dict, key: str, default_path: str) -> str:\n try:\n p = manifest.get(\"caches\", {}).get(key, {}).get(\"path\")\n p = _ensure_path(p)\n if p:\n return p\n except Exception:\n pass\n return default_path\n\ndef iter_jsonl(path: str) -> Iterator[Dict]:\n try:","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches.load_manifest","uri":"program://Program_Conditioned_Adapter/function/modules.caches.load_manifest#L13-L18","kind":"function","name":"load_manifest","path":"modules/caches.py","language":"python","start_line":13,"end_line":18,"context_start_line":1,"context_end_line":38,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom typing import Dict, Iterable, Iterator, List, Optional, Tuple\n\ndef _ensure_path(p: Optional[str]) -> Optional[str]:\n if not p:\n return None\n pth = os.path.abspath(os.path.expanduser(os.path.expandvars(p)))\n return pth if os.path.exists(pth) else None\n\ndef load_manifest(adapters_dir: str) -> Dict:\n mf = os.path.join(adapters_dir, \"manifest.json\")\n try:\n return json.loads(open(mf, \"r\", encoding=\"utf-8\").read())\n except Exception:\n return {}\n\ndef resolve_cache_path(manifest: Dict, key: str, default_path: str) -> str:\n try:\n p = manifest.get(\"caches\", {}).get(key, {}).get(\"path\")\n p = _ensure_path(p)\n if p:\n return p\n except Exception:\n pass\n return default_path\n\ndef iter_jsonl(path: str) -> Iterator[Dict]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n ln = ln.strip()\n if not ln:\n continue\n try:\n yield json.loads(ln)","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches.resolve_cache_path","uri":"program://Program_Conditioned_Adapter/function/modules.caches.resolve_cache_path#L20-L28","kind":"function","name":"resolve_cache_path","path":"modules/caches.py","language":"python","start_line":20,"end_line":28,"context_start_line":1,"context_end_line":48,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom typing import Dict, Iterable, Iterator, List, Optional, Tuple\n\ndef _ensure_path(p: Optional[str]) -> Optional[str]:\n if not p:\n return None\n pth = os.path.abspath(os.path.expanduser(os.path.expandvars(p)))\n return pth if os.path.exists(pth) else None\n\ndef load_manifest(adapters_dir: str) -> Dict:\n mf = os.path.join(adapters_dir, \"manifest.json\")\n try:\n return json.loads(open(mf, \"r\", encoding=\"utf-8\").read())\n except Exception:\n return {}\n\ndef resolve_cache_path(manifest: Dict, key: str, default_path: str) -> str:\n try:\n p = manifest.get(\"caches\", {}).get(key, {}).get(\"path\")\n p = _ensure_path(p)\n if p:\n return p\n except Exception:\n pass\n return default_path\n\ndef iter_jsonl(path: str) -> Iterator[Dict]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n ln = ln.strip()\n if not ln:\n continue\n try:\n yield json.loads(ln)\n except Exception:\n continue\n except Exception:\n return iter(())\n\ndef load_symbol_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n sym_path = resolve_cache_path(mf, \"symbol_index\", os.path.join(adapters_dir, \"symbol_index.jsonl\"))\n return list(iter_jsonl(sym_path))\n","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches.iter_jsonl","uri":"program://Program_Conditioned_Adapter/function/modules.caches.iter_jsonl#L30-L42","kind":"function","name":"iter_jsonl","path":"modules/caches.py","language":"python","start_line":30,"end_line":42,"context_start_line":10,"context_end_line":62,"code":" pth = os.path.abspath(os.path.expanduser(os.path.expandvars(p)))\n return pth if os.path.exists(pth) else None\n\ndef load_manifest(adapters_dir: str) -> Dict:\n mf = os.path.join(adapters_dir, \"manifest.json\")\n try:\n return json.loads(open(mf, \"r\", encoding=\"utf-8\").read())\n except Exception:\n return {}\n\ndef resolve_cache_path(manifest: Dict, key: str, default_path: str) -> str:\n try:\n p = manifest.get(\"caches\", {}).get(key, {}).get(\"path\")\n p = _ensure_path(p)\n if p:\n return p\n except Exception:\n pass\n return default_path\n\ndef iter_jsonl(path: str) -> Iterator[Dict]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n ln = ln.strip()\n if not ln:\n continue\n try:\n yield json.loads(ln)\n except Exception:\n continue\n except Exception:\n return iter(())\n\ndef load_symbol_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n sym_path = resolve_cache_path(mf, \"symbol_index\", os.path.join(adapters_dir, \"symbol_index.jsonl\"))\n return list(iter_jsonl(sym_path))\n\ndef load_windows_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n win_path = resolve_cache_path(mf, \"windows_index\", os.path.join(adapters_dir, \"windows_index.jsonl\"))\n return list(iter_jsonl(win_path))\n\ndef load_facts(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n facts_path = resolve_cache_path(mf, \"facts\", os.path.join(adapters_dir, \"facts.jsonl\"))\n return list(iter_jsonl(facts_path))\n\ndef pick_files_from_windows(program_root: str, windows: List[Dict], prompt: str, k: int = 8) -> List[str]:\n # Lightweight scoring: overlap of prompt tokens with filename and uri; prefer unique files\n import re\n toks = set([t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3])","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches.load_symbol_index","uri":"program://Program_Conditioned_Adapter/function/modules.caches.load_symbol_index#L44-L47","kind":"function","name":"load_symbol_index","path":"modules/caches.py","language":"python","start_line":44,"end_line":47,"context_start_line":24,"context_end_line":67,"code":" if p:\n return p\n except Exception:\n pass\n return default_path\n\ndef iter_jsonl(path: str) -> Iterator[Dict]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n ln = ln.strip()\n if not ln:\n continue\n try:\n yield json.loads(ln)\n except Exception:\n continue\n except Exception:\n return iter(())\n\ndef load_symbol_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n sym_path = resolve_cache_path(mf, \"symbol_index\", os.path.join(adapters_dir, \"symbol_index.jsonl\"))\n return list(iter_jsonl(sym_path))\n\ndef load_windows_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n win_path = resolve_cache_path(mf, \"windows_index\", os.path.join(adapters_dir, \"windows_index.jsonl\"))\n return list(iter_jsonl(win_path))\n\ndef load_facts(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n facts_path = resolve_cache_path(mf, \"facts\", os.path.join(adapters_dir, \"facts.jsonl\"))\n return list(iter_jsonl(facts_path))\n\ndef pick_files_from_windows(program_root: str, windows: List[Dict], prompt: str, k: int = 8) -> List[str]:\n # Lightweight scoring: overlap of prompt tokens with filename and uri; prefer unique files\n import re\n toks = set([t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3])\n scored: List[Tuple[float, str]] = []\n seen = set()\n for w in windows:\n rel = str(w.get(\"path\") or \"\").replace(\"\\\\\", \"/\")\n if not rel:","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches.load_windows_index","uri":"program://Program_Conditioned_Adapter/function/modules.caches.load_windows_index#L49-L52","kind":"function","name":"load_windows_index","path":"modules/caches.py","language":"python","start_line":49,"end_line":52,"context_start_line":29,"context_end_line":72,"code":"\ndef iter_jsonl(path: str) -> Iterator[Dict]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n ln = ln.strip()\n if not ln:\n continue\n try:\n yield json.loads(ln)\n except Exception:\n continue\n except Exception:\n return iter(())\n\ndef load_symbol_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n sym_path = resolve_cache_path(mf, \"symbol_index\", os.path.join(adapters_dir, \"symbol_index.jsonl\"))\n return list(iter_jsonl(sym_path))\n\ndef load_windows_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n win_path = resolve_cache_path(mf, \"windows_index\", os.path.join(adapters_dir, \"windows_index.jsonl\"))\n return list(iter_jsonl(win_path))\n\ndef load_facts(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n facts_path = resolve_cache_path(mf, \"facts\", os.path.join(adapters_dir, \"facts.jsonl\"))\n return list(iter_jsonl(facts_path))\n\ndef pick_files_from_windows(program_root: str, windows: List[Dict], prompt: str, k: int = 8) -> List[str]:\n # Lightweight scoring: overlap of prompt tokens with filename and uri; prefer unique files\n import re\n toks = set([t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3])\n scored: List[Tuple[float, str]] = []\n seen = set()\n for w in windows:\n rel = str(w.get(\"path\") or \"\").replace(\"\\\\\", \"/\")\n if not rel:\n continue\n if rel in seen:\n continue\n base = os.path.basename(rel).lower()\n uri = str(w.get(\"uri\") or \"\").lower()","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches.load_facts","uri":"program://Program_Conditioned_Adapter/function/modules.caches.load_facts#L54-L57","kind":"function","name":"load_facts","path":"modules/caches.py","language":"python","start_line":54,"end_line":57,"context_start_line":34,"context_end_line":77,"code":" ln = ln.strip()\n if not ln:\n continue\n try:\n yield json.loads(ln)\n except Exception:\n continue\n except Exception:\n return iter(())\n\ndef load_symbol_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n sym_path = resolve_cache_path(mf, \"symbol_index\", os.path.join(adapters_dir, \"symbol_index.jsonl\"))\n return list(iter_jsonl(sym_path))\n\ndef load_windows_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n win_path = resolve_cache_path(mf, \"windows_index\", os.path.join(adapters_dir, \"windows_index.jsonl\"))\n return list(iter_jsonl(win_path))\n\ndef load_facts(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n facts_path = resolve_cache_path(mf, \"facts\", os.path.join(adapters_dir, \"facts.jsonl\"))\n return list(iter_jsonl(facts_path))\n\ndef pick_files_from_windows(program_root: str, windows: List[Dict], prompt: str, k: int = 8) -> List[str]:\n # Lightweight scoring: overlap of prompt tokens with filename and uri; prefer unique files\n import re\n toks = set([t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3])\n scored: List[Tuple[float, str]] = []\n seen = set()\n for w in windows:\n rel = str(w.get(\"path\") or \"\").replace(\"\\\\\", \"/\")\n if not rel:\n continue\n if rel in seen:\n continue\n base = os.path.basename(rel).lower()\n uri = str(w.get(\"uri\") or \"\").lower()\n score = 0.0\n for t in toks:\n if t in base:\n score += 1.0\n if t in uri:","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.caches.pick_files_from_windows","uri":"program://Program_Conditioned_Adapter/function/modules.caches.pick_files_from_windows#L59-L84","kind":"function","name":"pick_files_from_windows","path":"modules/caches.py","language":"python","start_line":59,"end_line":84,"context_start_line":39,"context_end_line":87,"code":" except Exception:\n continue\n except Exception:\n return iter(())\n\ndef load_symbol_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n sym_path = resolve_cache_path(mf, \"symbol_index\", os.path.join(adapters_dir, \"symbol_index.jsonl\"))\n return list(iter_jsonl(sym_path))\n\ndef load_windows_index(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n win_path = resolve_cache_path(mf, \"windows_index\", os.path.join(adapters_dir, \"windows_index.jsonl\"))\n return list(iter_jsonl(win_path))\n\ndef load_facts(adapters_dir: str) -> List[Dict]:\n mf = load_manifest(adapters_dir)\n facts_path = resolve_cache_path(mf, \"facts\", os.path.join(adapters_dir, \"facts.jsonl\"))\n return list(iter_jsonl(facts_path))\n\ndef pick_files_from_windows(program_root: str, windows: List[Dict], prompt: str, k: int = 8) -> List[str]:\n # Lightweight scoring: overlap of prompt tokens with filename and uri; prefer unique files\n import re\n toks = set([t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3])\n scored: List[Tuple[float, str]] = []\n seen = set()\n for w in windows:\n rel = str(w.get(\"path\") or \"\").replace(\"\\\\\", \"/\")\n if not rel:\n continue\n if rel in seen:\n continue\n base = os.path.basename(rel).lower()\n uri = str(w.get(\"uri\") or \"\").lower()\n score = 0.0\n for t in toks:\n if t in base:\n score += 1.0\n if t in uri:\n score += 0.5\n if score > 0.0:\n scored.append((score, rel))\n seen.add(rel)\n scored.sort(key=lambda x: x[0], reverse=True)\n files = [rel for (_s, rel) in scored[: max(1, k)]]\n return files\n\n\n","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph","uri":"program://Program_Conditioned_Adapter/module/modules.program_graph#L1-L58","kind":"module","name":"modules.program_graph","path":"modules/program_graph.py","language":"python","start_line":1,"end_line":58,"context_start_line":1,"context_end_line":58,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, Protocol, Optional, Tuple, Dict, List\n\n\n# Core types\nEntityId = str\n\n\n@dataclass(frozen=True)\nclass Span:\n start_line: int\n end_line: int # inclusive, 1-based\n\n\n@dataclass(frozen=True)\nclass Entity:\n uri: str\n id: EntityId\n kind: str\n name: str\n owner: Optional[str] = None\n labels: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Edge:\n src: EntityId\n dst: EntityId\n type: str\n meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.Span","uri":"program://Program_Conditioned_Adapter/class/modules.program_graph.Span#L12-L14","kind":"class","name":"Span","path":"modules/program_graph.py","language":"python","start_line":12,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, Protocol, Optional, Tuple, Dict, List\n\n\n# Core types\nEntityId = str\n\n\n@dataclass(frozen=True)\nclass Span:\n start_line: int\n end_line: int # inclusive, 1-based\n\n\n@dataclass(frozen=True)\nclass Entity:\n uri: str\n id: EntityId\n kind: str\n name: str\n owner: Optional[str] = None\n labels: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Edge:\n src: EntityId\n dst: EntityId\n type: str\n meta: Optional[Dict[str, str]] = None\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.Entity","uri":"program://Program_Conditioned_Adapter/class/modules.program_graph.Entity#L18-L24","kind":"class","name":"Entity","path":"modules/program_graph.py","language":"python","start_line":18,"end_line":24,"context_start_line":1,"context_end_line":44,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, Protocol, Optional, Tuple, Dict, List\n\n\n# Core types\nEntityId = str\n\n\n@dataclass(frozen=True)\nclass Span:\n start_line: int\n end_line: int # inclusive, 1-based\n\n\n@dataclass(frozen=True)\nclass Entity:\n uri: str\n id: EntityId\n kind: str\n name: str\n owner: Optional[str] = None\n labels: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Edge:\n src: EntityId\n dst: EntityId\n type: str\n meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.Edge","uri":"program://Program_Conditioned_Adapter/class/modules.program_graph.Edge#L28-L32","kind":"class","name":"Edge","path":"modules/program_graph.py","language":"python","start_line":28,"end_line":32,"context_start_line":8,"context_end_line":52,"code":"EntityId = str\n\n\n@dataclass(frozen=True)\nclass Span:\n start_line: int\n end_line: int # inclusive, 1-based\n\n\n@dataclass(frozen=True)\nclass Entity:\n uri: str\n id: EntityId\n kind: str\n name: str\n owner: Optional[str] = None\n labels: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Edge:\n src: EntityId\n dst: EntityId\n type: str\n meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.Artifact","uri":"program://Program_Conditioned_Adapter/class/modules.program_graph.Artifact#L36-L40","kind":"class","name":"Artifact","path":"modules/program_graph.py","language":"python","start_line":36,"end_line":40,"context_start_line":16,"context_end_line":58,"code":"\n@dataclass(frozen=True)\nclass Entity:\n uri: str\n id: EntityId\n kind: str\n name: str\n owner: Optional[str] = None\n labels: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Edge:\n src: EntityId\n dst: EntityId\n type: str\n meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.ResolvedAnchor","uri":"program://Program_Conditioned_Adapter/class/modules.program_graph.ResolvedAnchor#L44-L47","kind":"class","name":"ResolvedAnchor","path":"modules/program_graph.py","language":"python","start_line":44,"end_line":47,"context_start_line":24,"context_end_line":58,"code":" labels: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Edge:\n src: EntityId\n dst: EntityId\n type: str\n meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.ProgramGraph","uri":"program://Program_Conditioned_Adapter/class/modules.program_graph.ProgramGraph#L50-L56","kind":"class","name":"ProgramGraph","path":"modules/program_graph.py","language":"python","start_line":50,"end_line":56,"context_start_line":30,"context_end_line":58,"code":" dst: EntityId\n type: str\n meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.entities","uri":"program://Program_Conditioned_Adapter/function/modules.program_graph.entities#L51-L51","kind":"function","name":"entities","path":"modules/program_graph.py","language":"python","start_line":51,"end_line":51,"context_start_line":31,"context_end_line":58,"code":" type: str\n meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.edges","uri":"program://Program_Conditioned_Adapter/function/modules.program_graph.edges#L52-L52","kind":"function","name":"edges","path":"modules/program_graph.py","language":"python","start_line":52,"end_line":52,"context_start_line":32,"context_end_line":58,"code":" meta: Optional[Dict[str, str]] = None\n\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.search_refs","uri":"program://Program_Conditioned_Adapter/function/modules.program_graph.search_refs#L53-L53","kind":"function","name":"search_refs","path":"modules/program_graph.py","language":"python","start_line":53,"end_line":53,"context_start_line":33,"context_end_line":58,"code":"\n\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.subgraph","uri":"program://Program_Conditioned_Adapter/function/modules.program_graph.subgraph#L54-L54","kind":"function","name":"subgraph","path":"modules/program_graph.py","language":"python","start_line":54,"end_line":54,"context_start_line":34,"context_end_line":58,"code":"\n@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.artifacts","uri":"program://Program_Conditioned_Adapter/function/modules.program_graph.artifacts#L55-L55","kind":"function","name":"artifacts","path":"modules/program_graph.py","language":"python","start_line":55,"end_line":55,"context_start_line":35,"context_end_line":58,"code":"@dataclass(frozen=True)\nclass Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_graph.resolve","uri":"program://Program_Conditioned_Adapter/function/modules.program_graph.resolve#L56-L56","kind":"function","name":"resolve","path":"modules/program_graph.py","language":"python","start_line":56,"end_line":56,"context_start_line":36,"context_end_line":58,"code":"class Artifact:\n uri: str\n type: str # e.g., \"source\"\n hash: str\n span: Optional[Span] = None\n\n\n@dataclass(frozen=True)\nclass ResolvedAnchor:\n artifact_uri: str\n span: Span\n hash: str\n\n\nclass ProgramGraph(Protocol):\n def entities(self) -> Iterable[Entity]: ...\n def edges(self) -> Iterable[Edge]: ...\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]: ...\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\": ...\n def artifacts(self, kind: str) -> Iterable[Artifact]: ...\n def resolve(self, uri: str) -> ResolvedAnchor: ...\n\n","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.context","uri":"program://Program_Conditioned_Adapter/module/modules.context#L1-L158","kind":"module","name":"modules.context","path":"modules/context.py","language":"python","start_line":1,"end_line":158,"context_start_line":1,"context_end_line":158,"code":"# function-first packer (lift from run_repo_adapter.py)\n\nfrom typing import List, Optional, Tuple\nimport os\nimport torch # type: ignore\n\n\ndef pack_context_heads(program_root: str, files: List[str], tok, budget_tokens: int) -> str:\n lines_out: List[str] = [\"Program snippets:\"]\n used = 0\n for rel in files:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n head_n = min(len(src_lines), 120)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:1-{head_n}\"] + src_lines[:head_n] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n head_n = min(len(src_lines), 60)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:1-{head_n}\"] + src_lines[:head_n] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget_tokens:\n break\n return \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n\n\ndef pack_context_windows(program_root: str, files: List[str], tok, budget_tokens: int) -> str:\n lines_out: List[str] = [\"Program windows:\"]\n used = 0\n for rel in files:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n def_lines: List[int] = []\n for i, line in enumerate(src_lines, start=1):\n s = line.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n def_lines.append(i)\n if len(def_lines) >= 2:\n break\n def_lines = def_lines or [1]\n for ln in def_lines:\n a = max(1, ln - 20)\n b = min(len(src_lines), ln + 40)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget_tokens:\n break\n if used >= budget_tokens:\n break\n return \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n\n\ndef collect_function_windows(program_root: str, files_: List[str], lines_each: int, *, max_candidates: int = 24) -> List[Tuple[str, int, int, int, List[str]]]:\n out: List[Tuple[str, int, int, int, List[str]]] = []\n for rel in files_:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n anchors: List[int] = []\n for i, line in enumerate(src_lines, start=1):\n s = line.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n anchors.append(i)\n if len(anchors) >= max(4, int(max_candidates) // max(1, len(files_))):\n break\n if not anchors:\n continue\n half = max(10, int(lines_each // 2))\n for ln in anchors:\n a = max(1, ln - half)\n b = min(len(src_lines), ln + half)\n out.append((rel, a, b, ln, src_lines[a - 1 : b]))\n return out\n\n\ndef extract_func_name_from_lines(lines_block: List[str], a: int, b: int, anchor_ln: int) -> Optional[str]:\n try:\n best_name = None\n best_dist = 10**9\n abs_ln = a\n import re as _re\n for ln_text in lines_block:\n s = ln_text.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n m = _re.match(r\"^(?:def|class)\\s+([A-Za-z0-9_]+)\", s)\n if m:\n name = m.group(1)\n dist = abs(abs_ln - anchor_ln)\n if dist < best_dist:\n best_dist = dist\n best_name = name\n abs_ln += 1\n return best_name\n except Exception:\n return None\n\n\n\ndef score_yes_no(tok, model, q: str) -> float:\n q_ids = tok(q, return_tensors=\"pt\")\n dev = next(model.parameters()).device\n q_ids = {k: v.to(dev) for k, v in q_ids.items()}\n with torch.no_grad():\n out_lm = model(**q_ids)\n # Support both HF-style outputs and raw tensor outputs\n if isinstance(out_lm, torch.Tensor):\n logits = out_lm\n else:\n logits = getattr(out_lm, \"logits\", None)\n if logits is None:\n # Best-effort fallbacks\n if isinstance(out_lm, (tuple, list)) and out_lm:\n logits = out_lm[0]\n else:\n logits = getattr(out_lm, \"last_hidden_state\", None)\n if logits is None:\n # As a last resort, return neutral 0.5\n return 0.5\n last = logits[:, -1, :]\n t1 = tok(\"1\", add_special_tokens=False).input_ids\n t0 = tok(\"0\", add_special_tokens=False).input_ids\n probs = torch.softmax(last, dim=-1)\n p1 = float(sum(probs[0, i].item() for i in (t1 or [])))\n p0 = float(sum(probs[0, i].item() for i in (t0 or [])))\n denom = max(1e-9, p1 + p0)\n return float(p1 / denom)\n\n\ndef model_prob_yes(tok, model, prompt_q: str, window_txt: str) -> Tuple[float, float]:\n rel_q = (\n \"Question: \" + prompt_q + \"\\nWindow:\\n\" + window_txt[:1800] + \"\\nDoes this window contain the core function or logic to answer the question? Answer 1 or 0.\"\n )\n noise_q = (\n \"Question: \" + prompt_q + \"\\nWindow:\\n\" + window_txt[:1800] + \"\\nIs this window likely test/tool/noise unrelated to answering the question? Answer 1 or 0.\"\n )\n return score_yes_no(tok, model, rel_q), score_yes_no(tok, model, noise_q)\n\n\n\n\"\"\"\"\"\" # end","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.context.pack_context_heads","uri":"program://Program_Conditioned_Adapter/function/modules.context.pack_context_heads#L8-L32","kind":"function","name":"pack_context_heads","path":"modules/context.py","language":"python","start_line":8,"end_line":32,"context_start_line":1,"context_end_line":52,"code":"# function-first packer (lift from run_repo_adapter.py)\n\nfrom typing import List, Optional, Tuple\nimport os\nimport torch # type: ignore\n\n\ndef pack_context_heads(program_root: str, files: List[str], tok, budget_tokens: int) -> str:\n lines_out: List[str] = [\"Program snippets:\"]\n used = 0\n for rel in files:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n head_n = min(len(src_lines), 120)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:1-{head_n}\"] + src_lines[:head_n] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n head_n = min(len(src_lines), 60)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:1-{head_n}\"] + src_lines[:head_n] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget_tokens:\n break\n return \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n\n\ndef pack_context_windows(program_root: str, files: List[str], tok, budget_tokens: int) -> str:\n lines_out: List[str] = [\"Program windows:\"]\n used = 0\n for rel in files:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n def_lines: List[int] = []\n for i, line in enumerate(src_lines, start=1):\n s = line.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n def_lines.append(i)\n if len(def_lines) >= 2:\n break\n def_lines = def_lines or [1]\n for ln in def_lines:","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.context.pack_context_windows","uri":"program://Program_Conditioned_Adapter/function/modules.context.pack_context_windows#L35-L66","kind":"function","name":"pack_context_windows","path":"modules/context.py","language":"python","start_line":35,"end_line":66,"context_start_line":15,"context_end_line":86,"code":" except Exception:\n continue\n head_n = min(len(src_lines), 120)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:1-{head_n}\"] + src_lines[:head_n] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n head_n = min(len(src_lines), 60)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:1-{head_n}\"] + src_lines[:head_n] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget_tokens:\n break\n return \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n\n\ndef pack_context_windows(program_root: str, files: List[str], tok, budget_tokens: int) -> str:\n lines_out: List[str] = [\"Program windows:\"]\n used = 0\n for rel in files:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n def_lines: List[int] = []\n for i, line in enumerate(src_lines, start=1):\n s = line.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n def_lines.append(i)\n if len(def_lines) >= 2:\n break\n def_lines = def_lines or [1]\n for ln in def_lines:\n a = max(1, ln - 20)\n b = min(len(src_lines), ln + 40)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget_tokens:\n break\n if used >= budget_tokens:\n break\n return \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n\n\ndef collect_function_windows(program_root: str, files_: List[str], lines_each: int, *, max_candidates: int = 24) -> List[Tuple[str, int, int, int, List[str]]]:\n out: List[Tuple[str, int, int, int, List[str]]] = []\n for rel in files_:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n anchors: List[int] = []\n for i, line in enumerate(src_lines, start=1):\n s = line.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n anchors.append(i)\n if len(anchors) >= max(4, int(max_candidates) // max(1, len(files_))):\n break\n if not anchors:\n continue\n half = max(10, int(lines_each // 2))","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.context.collect_function_windows","uri":"program://Program_Conditioned_Adapter/function/modules.context.collect_function_windows#L69-L91","kind":"function","name":"collect_function_windows","path":"modules/context.py","language":"python","start_line":69,"end_line":91,"context_start_line":49,"context_end_line":111,"code":" if len(def_lines) >= 2:\n break\n def_lines = def_lines or [1]\n for ln in def_lines:\n a = max(1, ln - 20)\n b = min(len(src_lines), ln + 40)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget_tokens:\n break\n if used >= budget_tokens:\n break\n return \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n\n\ndef collect_function_windows(program_root: str, files_: List[str], lines_each: int, *, max_candidates: int = 24) -> List[Tuple[str, int, int, int, List[str]]]:\n out: List[Tuple[str, int, int, int, List[str]]] = []\n for rel in files_:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n anchors: List[int] = []\n for i, line in enumerate(src_lines, start=1):\n s = line.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n anchors.append(i)\n if len(anchors) >= max(4, int(max_candidates) // max(1, len(files_))):\n break\n if not anchors:\n continue\n half = max(10, int(lines_each // 2))\n for ln in anchors:\n a = max(1, ln - half)\n b = min(len(src_lines), ln + half)\n out.append((rel, a, b, ln, src_lines[a - 1 : b]))\n return out\n\n\ndef extract_func_name_from_lines(lines_block: List[str], a: int, b: int, anchor_ln: int) -> Optional[str]:\n try:\n best_name = None\n best_dist = 10**9\n abs_ln = a\n import re as _re\n for ln_text in lines_block:\n s = ln_text.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n m = _re.match(r\"^(?:def|class)\\s+([A-Za-z0-9_]+)\", s)\n if m:\n name = m.group(1)\n dist = abs(abs_ln - anchor_ln)\n if dist < best_dist:\n best_dist = dist\n best_name = name\n abs_ln += 1\n return best_name","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.context.extract_func_name_from_lines","uri":"program://Program_Conditioned_Adapter/function/modules.context.extract_func_name_from_lines#L94-L113","kind":"function","name":"extract_func_name_from_lines","path":"modules/context.py","language":"python","start_line":94,"end_line":113,"context_start_line":74,"context_end_line":133,"code":" src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n anchors: List[int] = []\n for i, line in enumerate(src_lines, start=1):\n s = line.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n anchors.append(i)\n if len(anchors) >= max(4, int(max_candidates) // max(1, len(files_))):\n break\n if not anchors:\n continue\n half = max(10, int(lines_each // 2))\n for ln in anchors:\n a = max(1, ln - half)\n b = min(len(src_lines), ln + half)\n out.append((rel, a, b, ln, src_lines[a - 1 : b]))\n return out\n\n\ndef extract_func_name_from_lines(lines_block: List[str], a: int, b: int, anchor_ln: int) -> Optional[str]:\n try:\n best_name = None\n best_dist = 10**9\n abs_ln = a\n import re as _re\n for ln_text in lines_block:\n s = ln_text.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n m = _re.match(r\"^(?:def|class)\\s+([A-Za-z0-9_]+)\", s)\n if m:\n name = m.group(1)\n dist = abs(abs_ln - anchor_ln)\n if dist < best_dist:\n best_dist = dist\n best_name = name\n abs_ln += 1\n return best_name\n except Exception:\n return None\n\n\n\ndef score_yes_no(tok, model, q: str) -> float:\n q_ids = tok(q, return_tensors=\"pt\")\n dev = next(model.parameters()).device\n q_ids = {k: v.to(dev) for k, v in q_ids.items()}\n with torch.no_grad():\n out_lm = model(**q_ids)\n # Support both HF-style outputs and raw tensor outputs\n if isinstance(out_lm, torch.Tensor):\n logits = out_lm\n else:\n logits = getattr(out_lm, \"logits\", None)\n if logits is None:\n # Best-effort fallbacks\n if isinstance(out_lm, (tuple, list)) and out_lm:\n logits = out_lm[0]\n else:\n logits = getattr(out_lm, \"last_hidden_state\", None)","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.context.score_yes_no","uri":"program://Program_Conditioned_Adapter/function/modules.context.score_yes_no#L117-L144","kind":"function","name":"score_yes_no","path":"modules/context.py","language":"python","start_line":117,"end_line":144,"context_start_line":97,"context_end_line":158,"code":" best_dist = 10**9\n abs_ln = a\n import re as _re\n for ln_text in lines_block:\n s = ln_text.lstrip()\n if s.startswith(\"def \") or s.startswith(\"class \"):\n m = _re.match(r\"^(?:def|class)\\s+([A-Za-z0-9_]+)\", s)\n if m:\n name = m.group(1)\n dist = abs(abs_ln - anchor_ln)\n if dist < best_dist:\n best_dist = dist\n best_name = name\n abs_ln += 1\n return best_name\n except Exception:\n return None\n\n\n\ndef score_yes_no(tok, model, q: str) -> float:\n q_ids = tok(q, return_tensors=\"pt\")\n dev = next(model.parameters()).device\n q_ids = {k: v.to(dev) for k, v in q_ids.items()}\n with torch.no_grad():\n out_lm = model(**q_ids)\n # Support both HF-style outputs and raw tensor outputs\n if isinstance(out_lm, torch.Tensor):\n logits = out_lm\n else:\n logits = getattr(out_lm, \"logits\", None)\n if logits is None:\n # Best-effort fallbacks\n if isinstance(out_lm, (tuple, list)) and out_lm:\n logits = out_lm[0]\n else:\n logits = getattr(out_lm, \"last_hidden_state\", None)\n if logits is None:\n # As a last resort, return neutral 0.5\n return 0.5\n last = logits[:, -1, :]\n t1 = tok(\"1\", add_special_tokens=False).input_ids\n t0 = tok(\"0\", add_special_tokens=False).input_ids\n probs = torch.softmax(last, dim=-1)\n p1 = float(sum(probs[0, i].item() for i in (t1 or [])))\n p0 = float(sum(probs[0, i].item() for i in (t0 or [])))\n denom = max(1e-9, p1 + p0)\n return float(p1 / denom)\n\n\ndef model_prob_yes(tok, model, prompt_q: str, window_txt: str) -> Tuple[float, float]:\n rel_q = (\n \"Question: \" + prompt_q + \"\\nWindow:\\n\" + window_txt[:1800] + \"\\nDoes this window contain the core function or logic to answer the question? Answer 1 or 0.\"\n )\n noise_q = (\n \"Question: \" + prompt_q + \"\\nWindow:\\n\" + window_txt[:1800] + \"\\nIs this window likely test/tool/noise unrelated to answering the question? Answer 1 or 0.\"\n )\n return score_yes_no(tok, model, rel_q), score_yes_no(tok, model, noise_q)\n\n\n\n\"\"\"\"\"\" # end","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.context.model_prob_yes","uri":"program://Program_Conditioned_Adapter/function/modules.context.model_prob_yes#L147-L154","kind":"function","name":"model_prob_yes","path":"modules/context.py","language":"python","start_line":147,"end_line":154,"context_start_line":127,"context_end_line":158,"code":" logits = getattr(out_lm, \"logits\", None)\n if logits is None:\n # Best-effort fallbacks\n if isinstance(out_lm, (tuple, list)) and out_lm:\n logits = out_lm[0]\n else:\n logits = getattr(out_lm, \"last_hidden_state\", None)\n if logits is None:\n # As a last resort, return neutral 0.5\n return 0.5\n last = logits[:, -1, :]\n t1 = tok(\"1\", add_special_tokens=False).input_ids\n t0 = tok(\"0\", add_special_tokens=False).input_ids\n probs = torch.softmax(last, dim=-1)\n p1 = float(sum(probs[0, i].item() for i in (t1 or [])))\n p0 = float(sum(probs[0, i].item() for i in (t0 or [])))\n denom = max(1e-9, p1 + p0)\n return float(p1 / denom)\n\n\ndef model_prob_yes(tok, model, prompt_q: str, window_txt: str) -> Tuple[float, float]:\n rel_q = (\n \"Question: \" + prompt_q + \"\\nWindow:\\n\" + window_txt[:1800] + \"\\nDoes this window contain the core function or logic to answer the question? Answer 1 or 0.\"\n )\n noise_q = (\n \"Question: \" + prompt_q + \"\\nWindow:\\n\" + window_txt[:1800] + \"\\nIs this window likely test/tool/noise unrelated to answering the question? Answer 1 or 0.\"\n )\n return score_yes_no(tok, model, rel_q), score_yes_no(tok, model, noise_q)\n\n\n\n\"\"\"\"\"\" # end","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.priors","uri":"program://Program_Conditioned_Adapter/module/modules.priors#L1-L1","kind":"module","name":"modules.priors","path":"modules/priors.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"# kbann_priors derivation, round_lora utilities (move from run_repo_adapter.py/build_repo_adapter.py)","source_hash":"9c70796ea58c2b80959642f1ca2f16d8f440ff5e286421f865314e6840912f36","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.targets","uri":"program://Program_Conditioned_Adapter/module/modules.targets#L1-L18","kind":"module","name":"modules.targets","path":"modules/targets.py","language":"python","start_line":1,"end_line":18,"context_start_line":1,"context_end_line":18,"code":"from typing import Optional, Dict, Tuple\n\ndef parse_target_shapes(arg: Optional[str]) -> Optional[Dict[str, Tuple[int, int]]]:\n if not arg:\n return None\n result: Dict[str, Tuple[int, int]] = {}\n try:\n parts = [p.strip() for p in str(arg).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p or \":\" not in p:\n continue\n name, dims = p.split(\"=\", 1)\n a, b = dims.split(\":\", 1)\n result[name.strip()] = (int(a), int(b))\n return result or None\n except Exception:\n return None\n","source_hash":"0ce757281692921280250e1e33a68a68b4dd9db9013497f1b9ea4450c24718d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.targets.parse_target_shapes","uri":"program://Program_Conditioned_Adapter/function/modules.targets.parse_target_shapes#L3-L17","kind":"function","name":"parse_target_shapes","path":"modules/targets.py","language":"python","start_line":3,"end_line":17,"context_start_line":1,"context_end_line":18,"code":"from typing import Optional, Dict, Tuple\n\ndef parse_target_shapes(arg: Optional[str]) -> Optional[Dict[str, Tuple[int, int]]]:\n if not arg:\n return None\n result: Dict[str, Tuple[int, int]] = {}\n try:\n parts = [p.strip() for p in str(arg).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p or \":\" not in p:\n continue\n name, dims = p.split(\"=\", 1)\n a, b = dims.split(\":\", 1)\n result[name.strip()] = (int(a), int(b))\n return result or None\n except Exception:\n return None\n","source_hash":"0ce757281692921280250e1e33a68a68b4dd9db9013497f1b9ea4450c24718d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.capacity","uri":"program://Program_Conditioned_Adapter/module/modules.capacity#L1-L106","kind":"module","name":"modules.capacity","path":"modules/capacity.py","language":"python","start_line":1,"end_line":106,"context_start_line":1,"context_end_line":106,"code":"from __future__ import annotations\n\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\n\n\ndef _parse_weights(spec: str) -> Dict[str, float]:\n # accept both \"program\" and \"repo\" as primary component key\n out: Dict[str, float] = {\"program\": 0.4, \"subgraph\": 0.4, \"question\": 0.2}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n k, v = p.split(\"=\", 1)\n out[str(k).strip()] = float(v)\n except Exception:\n pass\n s = float(sum(max(0.0, v) for v in out.values()))\n if s > 0:\n out = {k: max(0.0, v) / s for k, v in out.items()}\n return out\n\n\ndef _safe_div(a: float, b: float) -> float:\n return (a / b) if (b > 0) else 0.0\n\n\ndef entropy_score(\n g: object,\n mods: List[str],\n files_rel: List[str],\n *,\n weights: str = \"program=0.4,subgraph=0.4,question=0.2\",\n) -> Tuple[float, Dict[str, float]]:\n \"\"\"Compute a 0..1 capacity score from program graph + selection.\n\n Components:\n - repo_component: module count (log-normalized) and import density\n - subgraph_component: selected modules' degree and breadth\n - question_component: number of files hit by the query\n \"\"\"\n try:\n n_mod = float(len(getattr(g, \"modules\", {}) or {}))\n except Exception:\n n_mod = 0.0\n try:\n import_edges = float(sum(len(v or []) for v in getattr(g, \"module_imports\", {}).values()))\n except Exception:\n import_edges = 0.0\n\n # Program-level\n repo_mod = min(1.0, _safe_div(np.log1p(n_mod), np.log1p(1000.0)))\n repo_deg = min(1.0, _safe_div(import_edges, max(1.0, n_mod * 8.0)))\n repo_comp = float(0.5 * repo_mod + 0.5 * repo_deg)\n\n # Subgraph-level\n mods_list = list(mods or [])\n sub_m = float(len(mods_list))\n sub_deg_sum = 0.0\n try:\n imports = getattr(g, \"module_imports\", {}) or {}\n except Exception:\n imports = {}\n for m in mods_list:\n try:\n indeg = sum(1 for _x, deps in imports.items() if m in (deps or []))\n outdeg = float(len(imports.get(m, []) or []))\n sub_deg_sum += float(indeg + outdeg)\n except Exception:\n continue\n sub_deg_norm = _safe_div(sub_deg_sum, max(1.0, sub_m * 8.0))\n sub_breadth = min(1.0, _safe_div(sub_m, max(1.0, n_mod)))\n sub_comp = float(0.5 * sub_deg_norm + 0.5 * sub_breadth)\n\n # Question-level\n q_files = float(len(files_rel or []))\n q_comp = min(1.0, _safe_div(q_files, 24.0))\n\n w = _parse_weights(weights)\n prim = w.get(\"program\", w.get(\"repo\", 0.4))\n score = float(prim * repo_comp + w.get(\"subgraph\", 0.4) * sub_comp + w.get(\"question\", 0.2) * q_comp)\n diag = {\n \"program_modules\": n_mod,\n \"program_import_edges\": import_edges,\n \"program_component\": repo_comp,\n \"subgraph_modules\": sub_m,\n \"subgraph_deg_norm\": sub_deg_norm,\n \"subgraph_component\": sub_comp,\n \"question_files\": q_files,\n \"question_component\": q_comp,\n \"score\": score,\n }\n return max(0.0, min(1.0, score)), diag\n\n\ndef scale_capacity(\n es: float, *, rank_min: int, rank_max: int, gsub_min: float, gsub_max: float\n) -> Tuple[int, float]:\n r = int(round(rank_min + es * max(0, (rank_max - rank_min))))\n g = float(gsub_min + es * max(0.0, (gsub_max - gsub_min)))\n r = max(rank_min, min(rank_max, r))\n g = max(min(gsub_max, max(gsub_min, g)), 0.0)\n return r, g\n","source_hash":"05e33fad6c4d97f0d8b3e970430186b95b71f20b74740aa0b07b48132b21c2a2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.capacity._parse_weights","uri":"program://Program_Conditioned_Adapter/function/modules.capacity._parse_weights#L8-L23","kind":"function","name":"_parse_weights","path":"modules/capacity.py","language":"python","start_line":8,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"from __future__ import annotations\n\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\n\n\ndef _parse_weights(spec: str) -> Dict[str, float]:\n # accept both \"program\" and \"repo\" as primary component key\n out: Dict[str, float] = {\"program\": 0.4, \"subgraph\": 0.4, \"question\": 0.2}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n k, v = p.split(\"=\", 1)\n out[str(k).strip()] = float(v)\n except Exception:\n pass\n s = float(sum(max(0.0, v) for v in out.values()))\n if s > 0:\n out = {k: max(0.0, v) / s for k, v in out.items()}\n return out\n\n\ndef _safe_div(a: float, b: float) -> float:\n return (a / b) if (b > 0) else 0.0\n\n\ndef entropy_score(\n g: object,\n mods: List[str],\n files_rel: List[str],\n *,\n weights: str = \"program=0.4,subgraph=0.4,question=0.2\",\n) -> Tuple[float, Dict[str, float]]:\n \"\"\"Compute a 0..1 capacity score from program graph + selection.\n\n Components:\n - repo_component: module count (log-normalized) and import density\n - subgraph_component: selected modules' degree and breadth\n - question_component: number of files hit by the query\n \"\"\"","source_hash":"05e33fad6c4d97f0d8b3e970430186b95b71f20b74740aa0b07b48132b21c2a2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.capacity._safe_div","uri":"program://Program_Conditioned_Adapter/function/modules.capacity._safe_div#L26-L27","kind":"function","name":"_safe_div","path":"modules/capacity.py","language":"python","start_line":26,"end_line":27,"context_start_line":6,"context_end_line":47,"code":"\n\ndef _parse_weights(spec: str) -> Dict[str, float]:\n # accept both \"program\" and \"repo\" as primary component key\n out: Dict[str, float] = {\"program\": 0.4, \"subgraph\": 0.4, \"question\": 0.2}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n k, v = p.split(\"=\", 1)\n out[str(k).strip()] = float(v)\n except Exception:\n pass\n s = float(sum(max(0.0, v) for v in out.values()))\n if s > 0:\n out = {k: max(0.0, v) / s for k, v in out.items()}\n return out\n\n\ndef _safe_div(a: float, b: float) -> float:\n return (a / b) if (b > 0) else 0.0\n\n\ndef entropy_score(\n g: object,\n mods: List[str],\n files_rel: List[str],\n *,\n weights: str = \"program=0.4,subgraph=0.4,question=0.2\",\n) -> Tuple[float, Dict[str, float]]:\n \"\"\"Compute a 0..1 capacity score from program graph + selection.\n\n Components:\n - repo_component: module count (log-normalized) and import density\n - subgraph_component: selected modules' degree and breadth\n - question_component: number of files hit by the query\n \"\"\"\n try:\n n_mod = float(len(getattr(g, \"modules\", {}) or {}))\n except Exception:\n n_mod = 0.0","source_hash":"05e33fad6c4d97f0d8b3e970430186b95b71f20b74740aa0b07b48132b21c2a2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.capacity.entropy_score","uri":"program://Program_Conditioned_Adapter/function/modules.capacity.entropy_score#L30-L95","kind":"function","name":"entropy_score","path":"modules/capacity.py","language":"python","start_line":30,"end_line":95,"context_start_line":10,"context_end_line":106,"code":" out: Dict[str, float] = {\"program\": 0.4, \"subgraph\": 0.4, \"question\": 0.2}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n k, v = p.split(\"=\", 1)\n out[str(k).strip()] = float(v)\n except Exception:\n pass\n s = float(sum(max(0.0, v) for v in out.values()))\n if s > 0:\n out = {k: max(0.0, v) / s for k, v in out.items()}\n return out\n\n\ndef _safe_div(a: float, b: float) -> float:\n return (a / b) if (b > 0) else 0.0\n\n\ndef entropy_score(\n g: object,\n mods: List[str],\n files_rel: List[str],\n *,\n weights: str = \"program=0.4,subgraph=0.4,question=0.2\",\n) -> Tuple[float, Dict[str, float]]:\n \"\"\"Compute a 0..1 capacity score from program graph + selection.\n\n Components:\n - repo_component: module count (log-normalized) and import density\n - subgraph_component: selected modules' degree and breadth\n - question_component: number of files hit by the query\n \"\"\"\n try:\n n_mod = float(len(getattr(g, \"modules\", {}) or {}))\n except Exception:\n n_mod = 0.0\n try:\n import_edges = float(sum(len(v or []) for v in getattr(g, \"module_imports\", {}).values()))\n except Exception:\n import_edges = 0.0\n\n # Program-level\n repo_mod = min(1.0, _safe_div(np.log1p(n_mod), np.log1p(1000.0)))\n repo_deg = min(1.0, _safe_div(import_edges, max(1.0, n_mod * 8.0)))\n repo_comp = float(0.5 * repo_mod + 0.5 * repo_deg)\n\n # Subgraph-level\n mods_list = list(mods or [])\n sub_m = float(len(mods_list))\n sub_deg_sum = 0.0\n try:\n imports = getattr(g, \"module_imports\", {}) or {}\n except Exception:\n imports = {}\n for m in mods_list:\n try:\n indeg = sum(1 for _x, deps in imports.items() if m in (deps or []))\n outdeg = float(len(imports.get(m, []) or []))\n sub_deg_sum += float(indeg + outdeg)\n except Exception:\n continue\n sub_deg_norm = _safe_div(sub_deg_sum, max(1.0, sub_m * 8.0))\n sub_breadth = min(1.0, _safe_div(sub_m, max(1.0, n_mod)))\n sub_comp = float(0.5 * sub_deg_norm + 0.5 * sub_breadth)\n\n # Question-level\n q_files = float(len(files_rel or []))\n q_comp = min(1.0, _safe_div(q_files, 24.0))\n\n w = _parse_weights(weights)\n prim = w.get(\"program\", w.get(\"repo\", 0.4))\n score = float(prim * repo_comp + w.get(\"subgraph\", 0.4) * sub_comp + w.get(\"question\", 0.2) * q_comp)\n diag = {\n \"program_modules\": n_mod,\n \"program_import_edges\": import_edges,\n \"program_component\": repo_comp,\n \"subgraph_modules\": sub_m,\n \"subgraph_deg_norm\": sub_deg_norm,\n \"subgraph_component\": sub_comp,\n \"question_files\": q_files,\n \"question_component\": q_comp,\n \"score\": score,\n }\n return max(0.0, min(1.0, score)), diag\n\n\ndef scale_capacity(\n es: float, *, rank_min: int, rank_max: int, gsub_min: float, gsub_max: float\n) -> Tuple[int, float]:\n r = int(round(rank_min + es * max(0, (rank_max - rank_min))))\n g = float(gsub_min + es * max(0.0, (gsub_max - gsub_min)))\n r = max(rank_min, min(rank_max, r))\n g = max(min(gsub_max, max(gsub_min, g)), 0.0)\n return r, g\n","source_hash":"05e33fad6c4d97f0d8b3e970430186b95b71f20b74740aa0b07b48132b21c2a2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.capacity.scale_capacity","uri":"program://Program_Conditioned_Adapter/function/modules.capacity.scale_capacity#L98-L105","kind":"function","name":"scale_capacity","path":"modules/capacity.py","language":"python","start_line":98,"end_line":105,"context_start_line":78,"context_end_line":106,"code":" q_files = float(len(files_rel or []))\n q_comp = min(1.0, _safe_div(q_files, 24.0))\n\n w = _parse_weights(weights)\n prim = w.get(\"program\", w.get(\"repo\", 0.4))\n score = float(prim * repo_comp + w.get(\"subgraph\", 0.4) * sub_comp + w.get(\"question\", 0.2) * q_comp)\n diag = {\n \"program_modules\": n_mod,\n \"program_import_edges\": import_edges,\n \"program_component\": repo_comp,\n \"subgraph_modules\": sub_m,\n \"subgraph_deg_norm\": sub_deg_norm,\n \"subgraph_component\": sub_comp,\n \"question_files\": q_files,\n \"question_component\": q_comp,\n \"score\": score,\n }\n return max(0.0, min(1.0, score)), diag\n\n\ndef scale_capacity(\n es: float, *, rank_min: int, rank_max: int, gsub_min: float, gsub_max: float\n) -> Tuple[int, float]:\n r = int(round(rank_min + es * max(0, (rank_max - rank_min))))\n g = float(gsub_min + es * max(0.0, (gsub_max - gsub_min)))\n r = max(rank_min, min(rank_max, r))\n g = max(min(gsub_max, max(gsub_min, g)), 0.0)\n return r, g\n","source_hash":"05e33fad6c4d97f0d8b3e970430186b95b71f20b74740aa0b07b48132b21c2a2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy","uri":"program://Program_Conditioned_Adapter/module/modules.retrieval_policy#L1-L116","kind":"module","name":"modules.retrieval_policy","path":"modules/retrieval_policy.py","language":"python","start_line":1,"end_line":116,"context_start_line":1,"context_end_line":116,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, Tuple, Iterable, Optional, List, Set\nimport math\n\nfrom .program_graph import ProgramGraph, Entity, Edge, Span\n\n\n@dataclass\nclass RetrievalMix:\n sim: float = 0.6\n struct: float = 0.4\n temp: float = 0.7\n\n @staticmethod\n def parse(spec: Optional[str], temp: Optional[float]) -> \"RetrievalMix\":\n if not spec:\n return RetrievalMix(temp=(temp if isinstance(temp, (int, float)) else 0.7))\n w_sim = 0.0\n w_struct = 0.0\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \":\" not in p:\n continue\n k, v = p.split(\":\", 1)\n if k.strip() == \"sim\":\n w_sim = float(v)\n elif k.strip() == \"struct\":\n w_struct = float(v)\n except Exception:\n w_sim, w_struct = 0.6, 0.4\n # normalize weights\n s = max(1e-6, (w_sim + w_struct))\n w_sim /= s\n w_struct /= s\n return RetrievalMix(sim=w_sim, struct=w_struct, temp=(temp if isinstance(temp, (int, float)) else 0.7))\n\n\nclass RetrievalPolicy:\n def __init__(self, mix: RetrievalMix):\n self.mix = mix\n\n @staticmethod\n def from_spec(policy: Optional[str], temp: Optional[float] = None) -> \"RetrievalPolicy\":\n return RetrievalPolicy(RetrievalMix.parse(policy, temp))\n\n def score_entities(self, query: str, pg: ProgramGraph) -> Dict[str, float]:\n # sim score: token overlap against entity name (very light BM25-ish)\n q_tokens = _tokenize(query)\n entities = list(pg.entities())\n sim: Dict[str, float] = {}\n for e in entities:\n name_tokens = _tokenize(e.name)\n overlap = len(q_tokens & name_tokens)\n sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score\n seeds = sorted(entities, key=lambda x: sim.get(x.id, 0.0), reverse=True)[:8]\n seed_ids = [e.id for e in seeds if sim.get(e.id, 0.0) > 0]\n dist = _bfs_distance(seed_ids, pg)\n struct: Dict[str, float] = {}\n for e in entities:\n d = dist.get(e.id, None)\n if d is None:\n struct[e.id] = 0.0\n else:\n # invert distance with light decay; d=0 => 1.0\n struct[e.id] = 1.0 / float(1 + d)\n # blend and softmax with temperature\n raw: Dict[str, float] = {}\n for e in entities:\n raw[e.id] = self.mix.sim * sim.get(e.id, 0.0) + self.mix.struct * struct.get(e.id, 0.0)\n return _softmax(raw, temperature=max(1e-3, float(self.mix.temp)))\n\n\ndef _tokenize(text: str) -> Set[str]:\n import re\n toks = [t.lower() for t in re.findall(r\"[A-Za-z0-9_]+\", text or \"\")]\n return set(t for t in toks if len(t) > 1)\n\n\ndef _bfs_distance(seeds: List[str], pg: ProgramGraph, max_depth: int = 3) -> Dict[str, int]:\n # build adjacency from edges()\n adj: Dict[str, List[str]] = {}\n for e in pg.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n dist: Dict[str, int] = {}\n cur = list(seeds)\n for s in seeds:\n dist[s] = 0\n depth = 0\n while cur and depth < max_depth:\n nxt: List[str] = []\n for u in cur:\n for v in adj.get(u, []):\n if v not in dist:\n dist[v] = dist[u] + 1\n nxt.append(v)\n cur = nxt\n depth += 1\n return dist\n\n\ndef _softmax(scores: Dict[str, float], temperature: float) -> Dict[str, float]:\n # numerically stable softmax over values\n vals = list(scores.values())\n if not vals:\n return {}\n m = max(vals)\n exps = {k: math.exp((v - m) / temperature) for k, v in scores.items()}\n s = sum(exps.values()) or 1.0\n return {k: (v / s) for k, v in exps.items()}\n\n","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy.RetrievalMix","uri":"program://Program_Conditioned_Adapter/class/modules.retrieval_policy.RetrievalMix#L11-L38","kind":"class","name":"RetrievalMix","path":"modules/retrieval_policy.py","language":"python","start_line":11,"end_line":38,"context_start_line":1,"context_end_line":58,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, Tuple, Iterable, Optional, List, Set\nimport math\n\nfrom .program_graph import ProgramGraph, Entity, Edge, Span\n\n\n@dataclass\nclass RetrievalMix:\n sim: float = 0.6\n struct: float = 0.4\n temp: float = 0.7\n\n @staticmethod\n def parse(spec: Optional[str], temp: Optional[float]) -> \"RetrievalMix\":\n if not spec:\n return RetrievalMix(temp=(temp if isinstance(temp, (int, float)) else 0.7))\n w_sim = 0.0\n w_struct = 0.0\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \":\" not in p:\n continue\n k, v = p.split(\":\", 1)\n if k.strip() == \"sim\":\n w_sim = float(v)\n elif k.strip() == \"struct\":\n w_struct = float(v)\n except Exception:\n w_sim, w_struct = 0.6, 0.4\n # normalize weights\n s = max(1e-6, (w_sim + w_struct))\n w_sim /= s\n w_struct /= s\n return RetrievalMix(sim=w_sim, struct=w_struct, temp=(temp if isinstance(temp, (int, float)) else 0.7))\n\n\nclass RetrievalPolicy:\n def __init__(self, mix: RetrievalMix):\n self.mix = mix\n\n @staticmethod\n def from_spec(policy: Optional[str], temp: Optional[float] = None) -> \"RetrievalPolicy\":\n return RetrievalPolicy(RetrievalMix.parse(policy, temp))\n\n def score_entities(self, query: str, pg: ProgramGraph) -> Dict[str, float]:\n # sim score: token overlap against entity name (very light BM25-ish)\n q_tokens = _tokenize(query)\n entities = list(pg.entities())\n sim: Dict[str, float] = {}\n for e in entities:\n name_tokens = _tokenize(e.name)\n overlap = len(q_tokens & name_tokens)\n sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy.RetrievalPolicy","uri":"program://Program_Conditioned_Adapter/class/modules.retrieval_policy.RetrievalPolicy#L41-L74","kind":"class","name":"RetrievalPolicy","path":"modules/retrieval_policy.py","language":"python","start_line":41,"end_line":74,"context_start_line":21,"context_end_line":94,"code":" w_struct = 0.0\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \":\" not in p:\n continue\n k, v = p.split(\":\", 1)\n if k.strip() == \"sim\":\n w_sim = float(v)\n elif k.strip() == \"struct\":\n w_struct = float(v)\n except Exception:\n w_sim, w_struct = 0.6, 0.4\n # normalize weights\n s = max(1e-6, (w_sim + w_struct))\n w_sim /= s\n w_struct /= s\n return RetrievalMix(sim=w_sim, struct=w_struct, temp=(temp if isinstance(temp, (int, float)) else 0.7))\n\n\nclass RetrievalPolicy:\n def __init__(self, mix: RetrievalMix):\n self.mix = mix\n\n @staticmethod\n def from_spec(policy: Optional[str], temp: Optional[float] = None) -> \"RetrievalPolicy\":\n return RetrievalPolicy(RetrievalMix.parse(policy, temp))\n\n def score_entities(self, query: str, pg: ProgramGraph) -> Dict[str, float]:\n # sim score: token overlap against entity name (very light BM25-ish)\n q_tokens = _tokenize(query)\n entities = list(pg.entities())\n sim: Dict[str, float] = {}\n for e in entities:\n name_tokens = _tokenize(e.name)\n overlap = len(q_tokens & name_tokens)\n sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score\n seeds = sorted(entities, key=lambda x: sim.get(x.id, 0.0), reverse=True)[:8]\n seed_ids = [e.id for e in seeds if sim.get(e.id, 0.0) > 0]\n dist = _bfs_distance(seed_ids, pg)\n struct: Dict[str, float] = {}\n for e in entities:\n d = dist.get(e.id, None)\n if d is None:\n struct[e.id] = 0.0\n else:\n # invert distance with light decay; d=0 => 1.0\n struct[e.id] = 1.0 / float(1 + d)\n # blend and softmax with temperature\n raw: Dict[str, float] = {}\n for e in entities:\n raw[e.id] = self.mix.sim * sim.get(e.id, 0.0) + self.mix.struct * struct.get(e.id, 0.0)\n return _softmax(raw, temperature=max(1e-3, float(self.mix.temp)))\n\n\ndef _tokenize(text: str) -> Set[str]:\n import re\n toks = [t.lower() for t in re.findall(r\"[A-Za-z0-9_]+\", text or \"\")]\n return set(t for t in toks if len(t) > 1)\n\n\ndef _bfs_distance(seeds: List[str], pg: ProgramGraph, max_depth: int = 3) -> Dict[str, int]:\n # build adjacency from edges()\n adj: Dict[str, List[str]] = {}\n for e in pg.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n dist: Dict[str, int] = {}\n cur = list(seeds)\n for s in seeds:\n dist[s] = 0\n depth = 0\n while cur and depth < max_depth:","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy._tokenize","uri":"program://Program_Conditioned_Adapter/function/modules.retrieval_policy._tokenize#L77-L80","kind":"function","name":"_tokenize","path":"modules/retrieval_policy.py","language":"python","start_line":77,"end_line":80,"context_start_line":57,"context_end_line":100,"code":" sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score\n seeds = sorted(entities, key=lambda x: sim.get(x.id, 0.0), reverse=True)[:8]\n seed_ids = [e.id for e in seeds if sim.get(e.id, 0.0) > 0]\n dist = _bfs_distance(seed_ids, pg)\n struct: Dict[str, float] = {}\n for e in entities:\n d = dist.get(e.id, None)\n if d is None:\n struct[e.id] = 0.0\n else:\n # invert distance with light decay; d=0 => 1.0\n struct[e.id] = 1.0 / float(1 + d)\n # blend and softmax with temperature\n raw: Dict[str, float] = {}\n for e in entities:\n raw[e.id] = self.mix.sim * sim.get(e.id, 0.0) + self.mix.struct * struct.get(e.id, 0.0)\n return _softmax(raw, temperature=max(1e-3, float(self.mix.temp)))\n\n\ndef _tokenize(text: str) -> Set[str]:\n import re\n toks = [t.lower() for t in re.findall(r\"[A-Za-z0-9_]+\", text or \"\")]\n return set(t for t in toks if len(t) > 1)\n\n\ndef _bfs_distance(seeds: List[str], pg: ProgramGraph, max_depth: int = 3) -> Dict[str, int]:\n # build adjacency from edges()\n adj: Dict[str, List[str]] = {}\n for e in pg.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n dist: Dict[str, int] = {}\n cur = list(seeds)\n for s in seeds:\n dist[s] = 0\n depth = 0\n while cur and depth < max_depth:\n nxt: List[str] = []\n for u in cur:\n for v in adj.get(u, []):\n if v not in dist:\n dist[v] = dist[u] + 1\n nxt.append(v)","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy._bfs_distance","uri":"program://Program_Conditioned_Adapter/function/modules.retrieval_policy._bfs_distance#L83-L103","kind":"function","name":"_bfs_distance","path":"modules/retrieval_policy.py","language":"python","start_line":83,"end_line":103,"context_start_line":63,"context_end_line":116,"code":" for e in entities:\n d = dist.get(e.id, None)\n if d is None:\n struct[e.id] = 0.0\n else:\n # invert distance with light decay; d=0 => 1.0\n struct[e.id] = 1.0 / float(1 + d)\n # blend and softmax with temperature\n raw: Dict[str, float] = {}\n for e in entities:\n raw[e.id] = self.mix.sim * sim.get(e.id, 0.0) + self.mix.struct * struct.get(e.id, 0.0)\n return _softmax(raw, temperature=max(1e-3, float(self.mix.temp)))\n\n\ndef _tokenize(text: str) -> Set[str]:\n import re\n toks = [t.lower() for t in re.findall(r\"[A-Za-z0-9_]+\", text or \"\")]\n return set(t for t in toks if len(t) > 1)\n\n\ndef _bfs_distance(seeds: List[str], pg: ProgramGraph, max_depth: int = 3) -> Dict[str, int]:\n # build adjacency from edges()\n adj: Dict[str, List[str]] = {}\n for e in pg.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n dist: Dict[str, int] = {}\n cur = list(seeds)\n for s in seeds:\n dist[s] = 0\n depth = 0\n while cur and depth < max_depth:\n nxt: List[str] = []\n for u in cur:\n for v in adj.get(u, []):\n if v not in dist:\n dist[v] = dist[u] + 1\n nxt.append(v)\n cur = nxt\n depth += 1\n return dist\n\n\ndef _softmax(scores: Dict[str, float], temperature: float) -> Dict[str, float]:\n # numerically stable softmax over values\n vals = list(scores.values())\n if not vals:\n return {}\n m = max(vals)\n exps = {k: math.exp((v - m) / temperature) for k, v in scores.items()}\n s = sum(exps.values()) or 1.0\n return {k: (v / s) for k, v in exps.items()}\n\n","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy._softmax","uri":"program://Program_Conditioned_Adapter/function/modules.retrieval_policy._softmax#L106-L114","kind":"function","name":"_softmax","path":"modules/retrieval_policy.py","language":"python","start_line":106,"end_line":114,"context_start_line":86,"context_end_line":116,"code":" for e in pg.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n dist: Dict[str, int] = {}\n cur = list(seeds)\n for s in seeds:\n dist[s] = 0\n depth = 0\n while cur and depth < max_depth:\n nxt: List[str] = []\n for u in cur:\n for v in adj.get(u, []):\n if v not in dist:\n dist[v] = dist[u] + 1\n nxt.append(v)\n cur = nxt\n depth += 1\n return dist\n\n\ndef _softmax(scores: Dict[str, float], temperature: float) -> Dict[str, float]:\n # numerically stable softmax over values\n vals = list(scores.values())\n if not vals:\n return {}\n m = max(vals)\n exps = {k: math.exp((v - m) / temperature) for k, v in scores.items()}\n s = sum(exps.values()) or 1.0\n return {k: (v / s) for k, v in exps.items()}\n\n","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy.parse","uri":"program://Program_Conditioned_Adapter/function/modules.retrieval_policy.parse#L17-L38","kind":"function","name":"parse","path":"modules/retrieval_policy.py","language":"python","start_line":17,"end_line":38,"context_start_line":1,"context_end_line":58,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, Tuple, Iterable, Optional, List, Set\nimport math\n\nfrom .program_graph import ProgramGraph, Entity, Edge, Span\n\n\n@dataclass\nclass RetrievalMix:\n sim: float = 0.6\n struct: float = 0.4\n temp: float = 0.7\n\n @staticmethod\n def parse(spec: Optional[str], temp: Optional[float]) -> \"RetrievalMix\":\n if not spec:\n return RetrievalMix(temp=(temp if isinstance(temp, (int, float)) else 0.7))\n w_sim = 0.0\n w_struct = 0.0\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \":\" not in p:\n continue\n k, v = p.split(\":\", 1)\n if k.strip() == \"sim\":\n w_sim = float(v)\n elif k.strip() == \"struct\":\n w_struct = float(v)\n except Exception:\n w_sim, w_struct = 0.6, 0.4\n # normalize weights\n s = max(1e-6, (w_sim + w_struct))\n w_sim /= s\n w_struct /= s\n return RetrievalMix(sim=w_sim, struct=w_struct, temp=(temp if isinstance(temp, (int, float)) else 0.7))\n\n\nclass RetrievalPolicy:\n def __init__(self, mix: RetrievalMix):\n self.mix = mix\n\n @staticmethod\n def from_spec(policy: Optional[str], temp: Optional[float] = None) -> \"RetrievalPolicy\":\n return RetrievalPolicy(RetrievalMix.parse(policy, temp))\n\n def score_entities(self, query: str, pg: ProgramGraph) -> Dict[str, float]:\n # sim score: token overlap against entity name (very light BM25-ish)\n q_tokens = _tokenize(query)\n entities = list(pg.entities())\n sim: Dict[str, float] = {}\n for e in entities:\n name_tokens = _tokenize(e.name)\n overlap = len(q_tokens & name_tokens)\n sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy.__init__","uri":"program://Program_Conditioned_Adapter/function/modules.retrieval_policy.__init__#L42-L43","kind":"function","name":"__init__","path":"modules/retrieval_policy.py","language":"python","start_line":42,"end_line":43,"context_start_line":22,"context_end_line":63,"code":" try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \":\" not in p:\n continue\n k, v = p.split(\":\", 1)\n if k.strip() == \"sim\":\n w_sim = float(v)\n elif k.strip() == \"struct\":\n w_struct = float(v)\n except Exception:\n w_sim, w_struct = 0.6, 0.4\n # normalize weights\n s = max(1e-6, (w_sim + w_struct))\n w_sim /= s\n w_struct /= s\n return RetrievalMix(sim=w_sim, struct=w_struct, temp=(temp if isinstance(temp, (int, float)) else 0.7))\n\n\nclass RetrievalPolicy:\n def __init__(self, mix: RetrievalMix):\n self.mix = mix\n\n @staticmethod\n def from_spec(policy: Optional[str], temp: Optional[float] = None) -> \"RetrievalPolicy\":\n return RetrievalPolicy(RetrievalMix.parse(policy, temp))\n\n def score_entities(self, query: str, pg: ProgramGraph) -> Dict[str, float]:\n # sim score: token overlap against entity name (very light BM25-ish)\n q_tokens = _tokenize(query)\n entities = list(pg.entities())\n sim: Dict[str, float] = {}\n for e in entities:\n name_tokens = _tokenize(e.name)\n overlap = len(q_tokens & name_tokens)\n sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score\n seeds = sorted(entities, key=lambda x: sim.get(x.id, 0.0), reverse=True)[:8]\n seed_ids = [e.id for e in seeds if sim.get(e.id, 0.0) > 0]\n dist = _bfs_distance(seed_ids, pg)\n struct: Dict[str, float] = {}\n for e in entities:","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy.from_spec","uri":"program://Program_Conditioned_Adapter/function/modules.retrieval_policy.from_spec#L46-L47","kind":"function","name":"from_spec","path":"modules/retrieval_policy.py","language":"python","start_line":46,"end_line":47,"context_start_line":26,"context_end_line":67,"code":" continue\n k, v = p.split(\":\", 1)\n if k.strip() == \"sim\":\n w_sim = float(v)\n elif k.strip() == \"struct\":\n w_struct = float(v)\n except Exception:\n w_sim, w_struct = 0.6, 0.4\n # normalize weights\n s = max(1e-6, (w_sim + w_struct))\n w_sim /= s\n w_struct /= s\n return RetrievalMix(sim=w_sim, struct=w_struct, temp=(temp if isinstance(temp, (int, float)) else 0.7))\n\n\nclass RetrievalPolicy:\n def __init__(self, mix: RetrievalMix):\n self.mix = mix\n\n @staticmethod\n def from_spec(policy: Optional[str], temp: Optional[float] = None) -> \"RetrievalPolicy\":\n return RetrievalPolicy(RetrievalMix.parse(policy, temp))\n\n def score_entities(self, query: str, pg: ProgramGraph) -> Dict[str, float]:\n # sim score: token overlap against entity name (very light BM25-ish)\n q_tokens = _tokenize(query)\n entities = list(pg.entities())\n sim: Dict[str, float] = {}\n for e in entities:\n name_tokens = _tokenize(e.name)\n overlap = len(q_tokens & name_tokens)\n sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score\n seeds = sorted(entities, key=lambda x: sim.get(x.id, 0.0), reverse=True)[:8]\n seed_ids = [e.id for e in seeds if sim.get(e.id, 0.0) > 0]\n dist = _bfs_distance(seed_ids, pg)\n struct: Dict[str, float] = {}\n for e in entities:\n d = dist.get(e.id, None)\n if d is None:\n struct[e.id] = 0.0\n else:","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.retrieval_policy.score_entities","uri":"program://Program_Conditioned_Adapter/function/modules.retrieval_policy.score_entities#L49-L74","kind":"function","name":"score_entities","path":"modules/retrieval_policy.py","language":"python","start_line":49,"end_line":74,"context_start_line":29,"context_end_line":94,"code":" w_sim = float(v)\n elif k.strip() == \"struct\":\n w_struct = float(v)\n except Exception:\n w_sim, w_struct = 0.6, 0.4\n # normalize weights\n s = max(1e-6, (w_sim + w_struct))\n w_sim /= s\n w_struct /= s\n return RetrievalMix(sim=w_sim, struct=w_struct, temp=(temp if isinstance(temp, (int, float)) else 0.7))\n\n\nclass RetrievalPolicy:\n def __init__(self, mix: RetrievalMix):\n self.mix = mix\n\n @staticmethod\n def from_spec(policy: Optional[str], temp: Optional[float] = None) -> \"RetrievalPolicy\":\n return RetrievalPolicy(RetrievalMix.parse(policy, temp))\n\n def score_entities(self, query: str, pg: ProgramGraph) -> Dict[str, float]:\n # sim score: token overlap against entity name (very light BM25-ish)\n q_tokens = _tokenize(query)\n entities = list(pg.entities())\n sim: Dict[str, float] = {}\n for e in entities:\n name_tokens = _tokenize(e.name)\n overlap = len(q_tokens & name_tokens)\n sim[e.id] = float(overlap)\n # struct score: graph distance from top sim seeds (k=8), shorter distance => higher score\n seeds = sorted(entities, key=lambda x: sim.get(x.id, 0.0), reverse=True)[:8]\n seed_ids = [e.id for e in seeds if sim.get(e.id, 0.0) > 0]\n dist = _bfs_distance(seed_ids, pg)\n struct: Dict[str, float] = {}\n for e in entities:\n d = dist.get(e.id, None)\n if d is None:\n struct[e.id] = 0.0\n else:\n # invert distance with light decay; d=0 => 1.0\n struct[e.id] = 1.0 / float(1 + d)\n # blend and softmax with temperature\n raw: Dict[str, float] = {}\n for e in entities:\n raw[e.id] = self.mix.sim * sim.get(e.id, 0.0) + self.mix.struct * struct.get(e.id, 0.0)\n return _softmax(raw, temperature=max(1e-3, float(self.mix.temp)))\n\n\ndef _tokenize(text: str) -> Set[str]:\n import re\n toks = [t.lower() for t in re.findall(r\"[A-Za-z0-9_]+\", text or \"\")]\n return set(t for t in toks if len(t) > 1)\n\n\ndef _bfs_distance(seeds: List[str], pg: ProgramGraph, max_depth: int = 3) -> Dict[str, int]:\n # build adjacency from edges()\n adj: Dict[str, List[str]] = {}\n for e in pg.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n dist: Dict[str, int] = {}\n cur = list(seeds)\n for s in seeds:\n dist[s] = 0\n depth = 0\n while cur and depth < max_depth:","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.provenance","uri":"program://Program_Conditioned_Adapter/module/modules.provenance#L1-L22","kind":"module","name":"modules.provenance","path":"modules/provenance.py","language":"python","start_line":1,"end_line":22,"context_start_line":1,"context_end_line":22,"code":"import subprocess\nfrom typing import Optional\n\n\ndef git_commit_sha(repo_root: str) -> Optional[str]:\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:\n return None\n\ndef git_tree_sha(repo_root: str) -> Optional[str]:\n \"\"\"Return the HEAD tree SHA if available (pins exact tracked file set).\"\"\"\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD^{tree}\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:\n return None","source_hash":"1681deb157113b893a10b6d1d63ce8939073f501432c3ab1971b7fcf75942fa3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.provenance.git_commit_sha","uri":"program://Program_Conditioned_Adapter/function/modules.provenance.git_commit_sha#L5-L12","kind":"function","name":"git_commit_sha","path":"modules/provenance.py","language":"python","start_line":5,"end_line":12,"context_start_line":1,"context_end_line":22,"code":"import subprocess\nfrom typing import Optional\n\n\ndef git_commit_sha(repo_root: str) -> Optional[str]:\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:\n return None\n\ndef git_tree_sha(repo_root: str) -> Optional[str]:\n \"\"\"Return the HEAD tree SHA if available (pins exact tracked file set).\"\"\"\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD^{tree}\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:\n return None","source_hash":"1681deb157113b893a10b6d1d63ce8939073f501432c3ab1971b7fcf75942fa3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.provenance.git_tree_sha","uri":"program://Program_Conditioned_Adapter/function/modules.provenance.git_tree_sha#L14-L22","kind":"function","name":"git_tree_sha","path":"modules/provenance.py","language":"python","start_line":14,"end_line":22,"context_start_line":1,"context_end_line":22,"code":"import subprocess\nfrom typing import Optional\n\n\ndef git_commit_sha(repo_root: str) -> Optional[str]:\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:\n return None\n\ndef git_tree_sha(repo_root: str) -> Optional[str]:\n \"\"\"Return the HEAD tree SHA if available (pins exact tracked file set).\"\"\"\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD^{tree}\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:\n return None","source_hash":"1681deb157113b893a10b6d1d63ce8939073f501432c3ab1971b7fcf75942fa3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.adapter","uri":"program://Program_Conditioned_Adapter/module/modules.adapter#L1-L290","kind":"module","name":"modules.adapter","path":"modules/adapter.py","language":"python","start_line":1,"end_line":290,"context_start_line":1,"context_end_line":290,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport numpy as np\nimport math\nimport os\nimport json\nimport re\nfrom .embedding import _stable_hash\n\ndef _make_random_matrix(shape: Tuple[int, int], *, seed: int) -> np.ndarray:\n rng = np.random.default_rng(seed)\n # Xavier uniform\n limit = math.sqrt(6.0 / float(shape[0] + shape[1]))\n return rng.uniform(-limit, limit, size=shape).astype(np.float32)\n\ndef generate_lora_from_embedding(\n z: np.ndarray,\n *,\n d_model: int,\n num_layers: int,\n rank: int = 8,\n seed: int = 0,\n targets: Optional[List[str]] = None,\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None,\n layer_gate: str = \"zmean\",\n target_weights: Optional[Dict[str, float]] = None,\n learn_bias: bool = False,\n map_cap: Optional[float] = None,\n) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n if targets is None:\n targets = [\"q_proj\", \"o_proj\", \"up_proj\"]\n\n z = z.astype(np.float32)\n # Always-on normalization for mapping\n try:\n nz = float(np.linalg.norm(z))\n if nz > 0:\n z = (z / nz).astype(np.float32)\n except Exception:\n pass\n gates: List[float] = []\n layers: List[Dict[str, Dict[str, np.ndarray]]] = []\n\n for layer_idx in range(num_layers):\n layer_state: Dict[str, Dict[str, np.ndarray]] = {}\n # derive per-layer seeds from z by hashing a projection\n key = int((_stable_hash(f\"layer:{layer_idx}\", seed) ^ _stable_hash(str(float(z[0])), seed + 7)) & ((1 << 31) - 1))\n # gate schedule\n frac = float(layer_idx) / float(max(1, num_layers - 1))\n if layer_gate == \"cosine\":\n gate = float(0.5 * (1.0 - math.cos(math.pi * frac)))\n elif layer_gate == \"hump\":\n gate = float(max(0.0, math.sin(math.pi * frac)))\n elif layer_gate == \"linear\":\n gate = float(frac)\n else: # zmean\n gate = float((np.tanh(z[(layer_idx * 13) % len(z)]) + 1.0) * 0.5)\n gates.append(gate)\n # Pair MLP projections: reuse seed and gate across up/gate/down; up/gate share A/B\n mlp_seed = key ^ _stable_hash(\"mlp_pair\", seed)\n up_pair: Optional[Tuple[np.ndarray, np.ndarray]] = None\n # Precompute per-target scalar from disjoint z segments (always-on)\n per_target_scale: Dict[str, float] = {}\n try:\n seg_len = max(8, int(len(z) // max(1, len(targets))))\n for idx_t, tgt in enumerate(targets):\n start = (layer_idx * 17 + idx_t * seg_len) % len(z)\n idxs = (np.arange(seg_len) + start) % len(z)\n seg = z[idxs]\n per_target_scale[tgt] = float(np.tanh(np.mean(seg) * 1.5))\n except Exception:\n per_target_scale = {}\n for tgt in targets:\n # A: d_out x r ; B: r x d_in\n if target_shapes and tgt in target_shapes:\n d_out, d_in = target_shapes[tgt]\n else:\n d_out, d_in = d_model, d_model\n # Coupled seeds for MLP blocks to align up/gate/down\n if tgt in (\"up_proj\", \"gate_proj\"):\n if up_pair is None:\n A = _make_random_matrix((int(d_out), rank), seed=mlp_seed ^ _stable_hash(\"up:A\", seed))\n B = _make_random_matrix((rank, int(d_in)), seed=mlp_seed ^ _stable_hash(\"up:B\", seed + 1))\n up_pair = (A, B)\n else:\n A, B = up_pair\n elif tgt == \"down_proj\":\n A = _make_random_matrix((int(d_out), rank), seed=mlp_seed ^ _stable_hash(\"down:A\", seed))\n B = _make_random_matrix((rank, int(d_in)), seed=mlp_seed ^ _stable_hash(\"down:B\", seed + 1))\n else:\n A = _make_random_matrix((int(d_out), rank), seed=key ^ _stable_hash(tgt + \":A\", seed))\n B = _make_random_matrix((rank, int(d_in)), seed=key ^ _stable_hash(tgt + \":B\", seed + 1))\n # Fan-in/fan-out scaling: A *= 1/sqrt(rank); optional B zeroing controlled by zero_B flag at call site via target_weights special key\n if rank > 0:\n A = (1.0 / float(max(1.0, math.sqrt(float(rank))))) * A\n # modulate by low-d projection of z (wrap-safe segment)\n start = (layer_idx * 31) % len(z)\n idx = (np.arange(32) + start) % len(z)\n seg = z[idx]\n alpha = float(np.clip(np.mean(seg) * 1.5, -1.0, 1.0))\n A = (1.0 + alpha * gate) * A\n B = (1.0 - alpha * gate) * B\n # Enhanced: per-target scalar (light) to reduce interference across targets\n if per_target_scale and tgt in per_target_scale:\n s_t = float(per_target_scale[tgt])\n A = (1.0 + 0.15 * s_t) * A\n B = (1.0 - 0.15 * s_t) * B\n if target_weights and tgt in target_weights:\n tw = float(target_weights[tgt])\n s = float(max(0.0, tw)) ** 0.5\n A = s * A\n B = s * B\n # Optional norm cap on A/B (Frobenius) to avoid runaway scales\n try:\n if map_cap is not None and float(map_cap) > 0:\n capv = float(map_cap)\n nA = float(np.linalg.norm(A)) if A.size > 0 else 0.0\n if nA > capv and nA > 0:\n A = (capv / nA) * A\n nB = float(np.linalg.norm(B)) if B.size > 0 else 0.0\n if nB > capv and nB > 0:\n B = (capv / nB) * B\n except Exception:\n pass\n e: Dict[str, np.ndarray] = {\"A\": A, \"B\": B, \"gate\": np.array([gate], dtype=np.float32)}\n if learn_bias:\n e[\"bias\"] = np.zeros((int(d_out),), dtype=np.float32)\n layer_state[tgt] = e\n layers.append(layer_state)\n\n return {\"layers\": layers, \"rank\": rank, \"d_model\": d_model, \"targets\": targets, \"gates\": np.array(gates, dtype=np.float32)}\n\ndef generate_lora_from_embedding_torch(\n z: np.ndarray,\n *,\n d_model: int,\n num_layers: int,\n rank: int = 8,\n seed: int = 0,\n targets: Optional[List[str]] = None,\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None,\n einsum_opt: str = \"auto\",\n layer_gate: str = \"zmean\",\n target_weights: Optional[Dict[str, float]] = None,\n map_cap: Optional[float] = None,\n) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n import torch # local import to avoid hard dep when unused\n\n if targets is None:\n targets = [\"q_proj\", \"o_proj\", \"up_proj\"]\n\n zt = torch.from_numpy(z.astype(np.float32))\n try:\n nz = float(torch.linalg.vector_norm(zt).item())\n if nz > 0:\n zt = (zt / nz).to(torch.float32)\n except Exception:\n pass\n layers: List[Dict[str, Dict[str, np.ndarray]]] = []\n gates: List[float] = []\n # Global seeding for reproducibility in case external torch ops run\n try:\n torch.manual_seed(int(seed))\n except Exception:\n pass\n\n for layer_idx in range(num_layers):\n layer_state: Dict[str, Dict[str, np.ndarray]] = {}\n key = int((_stable_hash(f\"layer:{layer_idx}\", seed) ^ _stable_hash(str(float(z[0])), seed + 7)) & ((1 << 31) - 1))\n frac = float(layer_idx) / float(max(1, num_layers - 1))\n if layer_gate == \"cosine\":\n gate = float(0.5 * (1.0 - math.cos(math.pi * frac)))\n elif layer_gate == \"hump\":\n gate = float(max(0.0, math.sin(math.pi * frac)))\n elif layer_gate == \"linear\":\n gate = float(frac)\n else:\n gate = float((np.tanh(z[(layer_idx * 13) % len(z)]) + 1.0) * 0.5)\n gates.append(gate)\n # deterministic torch RNG\n gen = torch.Generator(device=\"cpu\")\n gen.manual_seed(key)\n mlp_seed = key ^ _stable_hash(\"mlp_pair\", seed)\n up_pair: Optional[Tuple[torch.Tensor, torch.Tensor]] = None\n # Precompute per-target scale (always-on)\n per_target_scale: Dict[str, float] = {}\n try:\n seg_len = max(8, int(len(zt) // max(1, len(targets))))\n for idx_t, tgt in enumerate(targets):\n start = (layer_idx * 17 + idx_t * seg_len) % len(zt)\n idxs = (torch.arange(seg_len) + start) % len(zt)\n seg = zt[idxs]\n per_target_scale[tgt] = float(torch.tanh(seg.mean() * 1.5).item())\n except Exception:\n per_target_scale = {}\n for tgt in targets:\n if target_shapes and tgt in target_shapes:\n d_out, d_in = target_shapes[tgt]\n else:\n d_out, d_in = d_model, d_model\n if tgt in (\"up_proj\", \"gate_proj\"):\n if up_pair is None:\n A = (torch.rand((int(d_out), rank), generator=gen) * 2 - 1).to(torch.float32)\n B = (torch.rand((rank, int(d_in)), generator=gen) * 2 - 1).to(torch.float32)\n up_pair = (A, B)\n else:\n A, B = up_pair\n elif tgt == \"down_proj\":\n # reuse generator but distinct suffix for down\n A = (torch.rand((int(d_out), rank), generator=gen) * 2 - 1).to(torch.float32)\n B = (torch.rand((rank, int(d_in)), generator=gen) * 2 - 1).to(torch.float32)\n else:\n A = (torch.rand((int(d_out), rank), generator=gen) * 2 - 1).to(torch.float32)\n B = (torch.rand((rank, int(d_in)), generator=gen) * 2 - 1).to(torch.float32)\n if rank > 0:\n A = A * (1.0 / float(max(1.0, math.sqrt(float(rank)))))\n # Modulate by contraction with a deterministic kernel vector derived from z\n start = (layer_idx * 31) % len(z)\n idx = (np.arange(32) + start) % len(z)\n seg = zt[idx]\n w = torch.sin(torch.linspace(0, 3.14159, steps=32))\n if _contract is not None and einsum_opt:\n alpha = torch.tanh(_contract(\"i,i->\", seg, w, optimize=einsum_opt) / 8.0)\n else:\n alpha = torch.tanh((seg * w).sum() / 8.0)\n A = (1.0 + float(alpha.item()) * gate) * A\n B = (1.0 - float(alpha.item()) * gate) * B\n if per_target_scale and tgt in per_target_scale:\n s_t = float(per_target_scale[tgt])\n A = (1.0 + 0.15 * s_t) * A\n B = (1.0 - 0.15 * s_t) * B\n if target_weights and tgt in target_weights:\n tw = float(target_weights[tgt])\n s = float(max(0.0, tw)) ** 0.5\n A = (s * A)\n B = (s * B)\n # Optional norm cap (Frobenius) on A/B\n try:\n if map_cap is not None and float(map_cap) > 0:\n capv = float(map_cap)\n nA = float(torch.linalg.matrix_norm(A).item()) if A.numel() > 0 else 0.0\n if nA > capv and nA > 0:\n A = (capv / nA) * A\n nB = float(torch.linalg.matrix_norm(B).item()) if B.numel() > 0 else 0.0\n if nB > capv and nB > 0:\n B = (capv / nB) * B\n except Exception:\n pass\n e: Dict[str, np.ndarray] = {\n \"A\": A.numpy().astype(np.float32),\n \"B\": B.numpy().astype(np.float32),\n \"gate\": np.array([gate], dtype=np.float32),\n }\n # Torch path does not currently support learn_bias flag; add zero bias for parity if requested via target_weights special key later if needed\n layer_state[tgt] = e\n layers.append(layer_state)\n\n return {\"layers\": layers, \"rank\": rank, \"d_model\": d_model, \"targets\": targets, \"gates\": np.array(gates, dtype=np.float32)}\n\n\ndef save_npz(out_dir: str, *, embedding: Dict[str, np.ndarray], adapters: Dict[str, Any], manifest: Dict[str, Any]) -> None:\n os.makedirs(out_dir, exist_ok=True)\n np.savez_compressed(os.path.join(out_dir, \"embedding.npz\"), **embedding)\n # flatten adapter arrays\n flat: Dict[str, np.ndarray] = {}\n for i, layer in enumerate(adapters[\"layers\"]):\n for name, tensors in layer.items():\n flat[f\"L{i}.{name}.A\"] = tensors[\"A\"]\n flat[f\"L{i}.{name}.B\"] = tensors[\"B\"]\n gate_val = float(tensors[\"gate\"][0]) if isinstance(tensors.get(\"gate\"), np.ndarray) else float(tensors.get(\"gate\", 0.0))\n flat[f\"L{i}.{name}.gate\"] = np.array(gate_val, dtype=np.float32)\n if \"bias\" in tensors and isinstance(tensors[\"bias\"], np.ndarray):\n flat[f\"L{i}.{name}.bias\"] = tensors[\"bias\"]\n np.savez_compressed(os.path.join(out_dir, \"adapters.npz\"), **flat)\n open(os.path.join(out_dir, \"manifest.json\"), \"w\", encoding=\"utf-8\").write(json.dumps(manifest, indent=2))\n\ndef load_adapters_npz(path: str) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n data = np.load(path)\n # infer indices\n layers: Dict[int, Dict[str, Dict[str, np.ndarray]]] = {}\n for key in data.files:\n # L{idx}.{name}.{A|B|gate}\n parts = key.split(\".\")\n if len(parts) != 3:\n continue\n lid = int(parts[0][1:])\n name = parts[1]\n kind = parts[2]\n layers.setdefault(lid, {}).setdefault(name, {})[kind] = data[key]\n ordered = [layers[i] for i in sorted(layers.keys())]\n return {\"layers\": ordered}","source_hash":"b3a3d83adc4e6e7e862319c4ece6e97abf2b17399e2a67e387c23a34a8c89120","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.adapter._make_random_matrix","uri":"program://Program_Conditioned_Adapter/function/modules.adapter._make_random_matrix#L9-L13","kind":"function","name":"_make_random_matrix","path":"modules/adapter.py","language":"python","start_line":9,"end_line":13,"context_start_line":1,"context_end_line":33,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport numpy as np\nimport math\nimport os\nimport json\nimport re\nfrom .embedding import _stable_hash\n\ndef _make_random_matrix(shape: Tuple[int, int], *, seed: int) -> np.ndarray:\n rng = np.random.default_rng(seed)\n # Xavier uniform\n limit = math.sqrt(6.0 / float(shape[0] + shape[1]))\n return rng.uniform(-limit, limit, size=shape).astype(np.float32)\n\ndef generate_lora_from_embedding(\n z: np.ndarray,\n *,\n d_model: int,\n num_layers: int,\n rank: int = 8,\n seed: int = 0,\n targets: Optional[List[str]] = None,\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None,\n layer_gate: str = \"zmean\",\n target_weights: Optional[Dict[str, float]] = None,\n learn_bias: bool = False,\n map_cap: Optional[float] = None,\n) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n if targets is None:\n targets = [\"q_proj\", \"o_proj\", \"up_proj\"]\n\n z = z.astype(np.float32)\n # Always-on normalization for mapping","source_hash":"b3a3d83adc4e6e7e862319c4ece6e97abf2b17399e2a67e387c23a34a8c89120","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.adapter.generate_lora_from_embedding","uri":"program://Program_Conditioned_Adapter/function/modules.adapter.generate_lora_from_embedding#L15-L130","kind":"function","name":"generate_lora_from_embedding","path":"modules/adapter.py","language":"python","start_line":15,"end_line":130,"context_start_line":1,"context_end_line":150,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport numpy as np\nimport math\nimport os\nimport json\nimport re\nfrom .embedding import _stable_hash\n\ndef _make_random_matrix(shape: Tuple[int, int], *, seed: int) -> np.ndarray:\n rng = np.random.default_rng(seed)\n # Xavier uniform\n limit = math.sqrt(6.0 / float(shape[0] + shape[1]))\n return rng.uniform(-limit, limit, size=shape).astype(np.float32)\n\ndef generate_lora_from_embedding(\n z: np.ndarray,\n *,\n d_model: int,\n num_layers: int,\n rank: int = 8,\n seed: int = 0,\n targets: Optional[List[str]] = None,\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None,\n layer_gate: str = \"zmean\",\n target_weights: Optional[Dict[str, float]] = None,\n learn_bias: bool = False,\n map_cap: Optional[float] = None,\n) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n if targets is None:\n targets = [\"q_proj\", \"o_proj\", \"up_proj\"]\n\n z = z.astype(np.float32)\n # Always-on normalization for mapping\n try:\n nz = float(np.linalg.norm(z))\n if nz > 0:\n z = (z / nz).astype(np.float32)\n except Exception:\n pass\n gates: List[float] = []\n layers: List[Dict[str, Dict[str, np.ndarray]]] = []\n\n for layer_idx in range(num_layers):\n layer_state: Dict[str, Dict[str, np.ndarray]] = {}\n # derive per-layer seeds from z by hashing a projection\n key = int((_stable_hash(f\"layer:{layer_idx}\", seed) ^ _stable_hash(str(float(z[0])), seed + 7)) & ((1 << 31) - 1))\n # gate schedule\n frac = float(layer_idx) / float(max(1, num_layers - 1))\n if layer_gate == \"cosine\":\n gate = float(0.5 * (1.0 - math.cos(math.pi * frac)))\n elif layer_gate == \"hump\":\n gate = float(max(0.0, math.sin(math.pi * frac)))\n elif layer_gate == \"linear\":\n gate = float(frac)\n else: # zmean\n gate = float((np.tanh(z[(layer_idx * 13) % len(z)]) + 1.0) * 0.5)\n gates.append(gate)\n # Pair MLP projections: reuse seed and gate across up/gate/down; up/gate share A/B\n mlp_seed = key ^ _stable_hash(\"mlp_pair\", seed)\n up_pair: Optional[Tuple[np.ndarray, np.ndarray]] = None\n # Precompute per-target scalar from disjoint z segments (always-on)\n per_target_scale: Dict[str, float] = {}\n try:\n seg_len = max(8, int(len(z) // max(1, len(targets))))\n for idx_t, tgt in enumerate(targets):\n start = (layer_idx * 17 + idx_t * seg_len) % len(z)\n idxs = (np.arange(seg_len) + start) % len(z)\n seg = z[idxs]\n per_target_scale[tgt] = float(np.tanh(np.mean(seg) * 1.5))\n except Exception:\n per_target_scale = {}\n for tgt in targets:\n # A: d_out x r ; B: r x d_in\n if target_shapes and tgt in target_shapes:\n d_out, d_in = target_shapes[tgt]\n else:\n d_out, d_in = d_model, d_model\n # Coupled seeds for MLP blocks to align up/gate/down\n if tgt in (\"up_proj\", \"gate_proj\"):\n if up_pair is None:\n A = _make_random_matrix((int(d_out), rank), seed=mlp_seed ^ _stable_hash(\"up:A\", seed))\n B = _make_random_matrix((rank, int(d_in)), seed=mlp_seed ^ _stable_hash(\"up:B\", seed + 1))\n up_pair = (A, B)\n else:\n A, B = up_pair\n elif tgt == \"down_proj\":\n A = _make_random_matrix((int(d_out), rank), seed=mlp_seed ^ _stable_hash(\"down:A\", seed))\n B = _make_random_matrix((rank, int(d_in)), seed=mlp_seed ^ _stable_hash(\"down:B\", seed + 1))\n else:\n A = _make_random_matrix((int(d_out), rank), seed=key ^ _stable_hash(tgt + \":A\", seed))\n B = _make_random_matrix((rank, int(d_in)), seed=key ^ _stable_hash(tgt + \":B\", seed + 1))\n # Fan-in/fan-out scaling: A *= 1/sqrt(rank); optional B zeroing controlled by zero_B flag at call site via target_weights special key\n if rank > 0:\n A = (1.0 / float(max(1.0, math.sqrt(float(rank))))) * A\n # modulate by low-d projection of z (wrap-safe segment)\n start = (layer_idx * 31) % len(z)\n idx = (np.arange(32) + start) % len(z)\n seg = z[idx]\n alpha = float(np.clip(np.mean(seg) * 1.5, -1.0, 1.0))\n A = (1.0 + alpha * gate) * A\n B = (1.0 - alpha * gate) * B\n # Enhanced: per-target scalar (light) to reduce interference across targets\n if per_target_scale and tgt in per_target_scale:\n s_t = float(per_target_scale[tgt])\n A = (1.0 + 0.15 * s_t) * A\n B = (1.0 - 0.15 * s_t) * B\n if target_weights and tgt in target_weights:\n tw = float(target_weights[tgt])\n s = float(max(0.0, tw)) ** 0.5\n A = s * A\n B = s * B\n # Optional norm cap on A/B (Frobenius) to avoid runaway scales\n try:\n if map_cap is not None and float(map_cap) > 0:\n capv = float(map_cap)\n nA = float(np.linalg.norm(A)) if A.size > 0 else 0.0\n if nA > capv and nA > 0:\n A = (capv / nA) * A\n nB = float(np.linalg.norm(B)) if B.size > 0 else 0.0\n if nB > capv and nB > 0:\n B = (capv / nB) * B\n except Exception:\n pass\n e: Dict[str, np.ndarray] = {\"A\": A, \"B\": B, \"gate\": np.array([gate], dtype=np.float32)}\n if learn_bias:\n e[\"bias\"] = np.zeros((int(d_out),), dtype=np.float32)\n layer_state[tgt] = e\n layers.append(layer_state)\n\n return {\"layers\": layers, \"rank\": rank, \"d_model\": d_model, \"targets\": targets, \"gates\": np.array(gates, dtype=np.float32)}\n\ndef generate_lora_from_embedding_torch(\n z: np.ndarray,\n *,\n d_model: int,\n num_layers: int,\n rank: int = 8,\n seed: int = 0,\n targets: Optional[List[str]] = None,\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None,\n einsum_opt: str = \"auto\",\n layer_gate: str = \"zmean\",\n target_weights: Optional[Dict[str, float]] = None,\n map_cap: Optional[float] = None,\n) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n import torch # local import to avoid hard dep when unused\n\n if targets is None:\n targets = [\"q_proj\", \"o_proj\", \"up_proj\"]\n","source_hash":"b3a3d83adc4e6e7e862319c4ece6e97abf2b17399e2a67e387c23a34a8c89120","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.adapter.generate_lora_from_embedding_torch","uri":"program://Program_Conditioned_Adapter/function/modules.adapter.generate_lora_from_embedding_torch#L132-L257","kind":"function","name":"generate_lora_from_embedding_torch","path":"modules/adapter.py","language":"python","start_line":132,"end_line":257,"context_start_line":112,"context_end_line":277,"code":" # Optional norm cap on A/B (Frobenius) to avoid runaway scales\n try:\n if map_cap is not None and float(map_cap) > 0:\n capv = float(map_cap)\n nA = float(np.linalg.norm(A)) if A.size > 0 else 0.0\n if nA > capv and nA > 0:\n A = (capv / nA) * A\n nB = float(np.linalg.norm(B)) if B.size > 0 else 0.0\n if nB > capv and nB > 0:\n B = (capv / nB) * B\n except Exception:\n pass\n e: Dict[str, np.ndarray] = {\"A\": A, \"B\": B, \"gate\": np.array([gate], dtype=np.float32)}\n if learn_bias:\n e[\"bias\"] = np.zeros((int(d_out),), dtype=np.float32)\n layer_state[tgt] = e\n layers.append(layer_state)\n\n return {\"layers\": layers, \"rank\": rank, \"d_model\": d_model, \"targets\": targets, \"gates\": np.array(gates, dtype=np.float32)}\n\ndef generate_lora_from_embedding_torch(\n z: np.ndarray,\n *,\n d_model: int,\n num_layers: int,\n rank: int = 8,\n seed: int = 0,\n targets: Optional[List[str]] = None,\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None,\n einsum_opt: str = \"auto\",\n layer_gate: str = \"zmean\",\n target_weights: Optional[Dict[str, float]] = None,\n map_cap: Optional[float] = None,\n) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n import torch # local import to avoid hard dep when unused\n\n if targets is None:\n targets = [\"q_proj\", \"o_proj\", \"up_proj\"]\n\n zt = torch.from_numpy(z.astype(np.float32))\n try:\n nz = float(torch.linalg.vector_norm(zt).item())\n if nz > 0:\n zt = (zt / nz).to(torch.float32)\n except Exception:\n pass\n layers: List[Dict[str, Dict[str, np.ndarray]]] = []\n gates: List[float] = []\n # Global seeding for reproducibility in case external torch ops run\n try:\n torch.manual_seed(int(seed))\n except Exception:\n pass\n\n for layer_idx in range(num_layers):\n layer_state: Dict[str, Dict[str, np.ndarray]] = {}\n key = int((_stable_hash(f\"layer:{layer_idx}\", seed) ^ _stable_hash(str(float(z[0])), seed + 7)) & ((1 << 31) - 1))\n frac = float(layer_idx) / float(max(1, num_layers - 1))\n if layer_gate == \"cosine\":\n gate = float(0.5 * (1.0 - math.cos(math.pi * frac)))\n elif layer_gate == \"hump\":\n gate = float(max(0.0, math.sin(math.pi * frac)))\n elif layer_gate == \"linear\":\n gate = float(frac)\n else:\n gate = float((np.tanh(z[(layer_idx * 13) % len(z)]) + 1.0) * 0.5)\n gates.append(gate)\n # deterministic torch RNG\n gen = torch.Generator(device=\"cpu\")\n gen.manual_seed(key)\n mlp_seed = key ^ _stable_hash(\"mlp_pair\", seed)\n up_pair: Optional[Tuple[torch.Tensor, torch.Tensor]] = None\n # Precompute per-target scale (always-on)\n per_target_scale: Dict[str, float] = {}\n try:\n seg_len = max(8, int(len(zt) // max(1, len(targets))))\n for idx_t, tgt in enumerate(targets):\n start = (layer_idx * 17 + idx_t * seg_len) % len(zt)\n idxs = (torch.arange(seg_len) + start) % len(zt)\n seg = zt[idxs]\n per_target_scale[tgt] = float(torch.tanh(seg.mean() * 1.5).item())\n except Exception:\n per_target_scale = {}\n for tgt in targets:\n if target_shapes and tgt in target_shapes:\n d_out, d_in = target_shapes[tgt]\n else:\n d_out, d_in = d_model, d_model\n if tgt in (\"up_proj\", \"gate_proj\"):\n if up_pair is None:\n A = (torch.rand((int(d_out), rank), generator=gen) * 2 - 1).to(torch.float32)\n B = (torch.rand((rank, int(d_in)), generator=gen) * 2 - 1).to(torch.float32)\n up_pair = (A, B)\n else:\n A, B = up_pair\n elif tgt == \"down_proj\":\n # reuse generator but distinct suffix for down\n A = (torch.rand((int(d_out), rank), generator=gen) * 2 - 1).to(torch.float32)\n B = (torch.rand((rank, int(d_in)), generator=gen) * 2 - 1).to(torch.float32)\n else:\n A = (torch.rand((int(d_out), rank), generator=gen) * 2 - 1).to(torch.float32)\n B = (torch.rand((rank, int(d_in)), generator=gen) * 2 - 1).to(torch.float32)\n if rank > 0:\n A = A * (1.0 / float(max(1.0, math.sqrt(float(rank)))))\n # Modulate by contraction with a deterministic kernel vector derived from z\n start = (layer_idx * 31) % len(z)\n idx = (np.arange(32) + start) % len(z)\n seg = zt[idx]\n w = torch.sin(torch.linspace(0, 3.14159, steps=32))\n if _contract is not None and einsum_opt:\n alpha = torch.tanh(_contract(\"i,i->\", seg, w, optimize=einsum_opt) / 8.0)\n else:\n alpha = torch.tanh((seg * w).sum() / 8.0)\n A = (1.0 + float(alpha.item()) * gate) * A\n B = (1.0 - float(alpha.item()) * gate) * B\n if per_target_scale and tgt in per_target_scale:\n s_t = float(per_target_scale[tgt])\n A = (1.0 + 0.15 * s_t) * A\n B = (1.0 - 0.15 * s_t) * B\n if target_weights and tgt in target_weights:\n tw = float(target_weights[tgt])\n s = float(max(0.0, tw)) ** 0.5\n A = (s * A)\n B = (s * B)\n # Optional norm cap (Frobenius) on A/B\n try:\n if map_cap is not None and float(map_cap) > 0:\n capv = float(map_cap)\n nA = float(torch.linalg.matrix_norm(A).item()) if A.numel() > 0 else 0.0\n if nA > capv and nA > 0:\n A = (capv / nA) * A\n nB = float(torch.linalg.matrix_norm(B).item()) if B.numel() > 0 else 0.0\n if nB > capv and nB > 0:\n B = (capv / nB) * B\n except Exception:\n pass\n e: Dict[str, np.ndarray] = {\n \"A\": A.numpy().astype(np.float32),\n \"B\": B.numpy().astype(np.float32),\n \"gate\": np.array([gate], dtype=np.float32),\n }\n # Torch path does not currently support learn_bias flag; add zero bias for parity if requested via target_weights special key later if needed\n layer_state[tgt] = e\n layers.append(layer_state)\n\n return {\"layers\": layers, \"rank\": rank, \"d_model\": d_model, \"targets\": targets, \"gates\": np.array(gates, dtype=np.float32)}\n\n\ndef save_npz(out_dir: str, *, embedding: Dict[str, np.ndarray], adapters: Dict[str, Any], manifest: Dict[str, Any]) -> None:\n os.makedirs(out_dir, exist_ok=True)\n np.savez_compressed(os.path.join(out_dir, \"embedding.npz\"), **embedding)\n # flatten adapter arrays\n flat: Dict[str, np.ndarray] = {}\n for i, layer in enumerate(adapters[\"layers\"]):\n for name, tensors in layer.items():\n flat[f\"L{i}.{name}.A\"] = tensors[\"A\"]\n flat[f\"L{i}.{name}.B\"] = tensors[\"B\"]\n gate_val = float(tensors[\"gate\"][0]) if isinstance(tensors.get(\"gate\"), np.ndarray) else float(tensors.get(\"gate\", 0.0))\n flat[f\"L{i}.{name}.gate\"] = np.array(gate_val, dtype=np.float32)\n if \"bias\" in tensors and isinstance(tensors[\"bias\"], np.ndarray):\n flat[f\"L{i}.{name}.bias\"] = tensors[\"bias\"]\n np.savez_compressed(os.path.join(out_dir, \"adapters.npz\"), **flat)\n open(os.path.join(out_dir, \"manifest.json\"), \"w\", encoding=\"utf-8\").write(json.dumps(manifest, indent=2))\n\ndef load_adapters_npz(path: str) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n data = np.load(path)","source_hash":"b3a3d83adc4e6e7e862319c4ece6e97abf2b17399e2a67e387c23a34a8c89120","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.adapter.save_npz","uri":"program://Program_Conditioned_Adapter/function/modules.adapter.save_npz#L260-L274","kind":"function","name":"save_npz","path":"modules/adapter.py","language":"python","start_line":260,"end_line":274,"context_start_line":240,"context_end_line":290,"code":" nA = float(torch.linalg.matrix_norm(A).item()) if A.numel() > 0 else 0.0\n if nA > capv and nA > 0:\n A = (capv / nA) * A\n nB = float(torch.linalg.matrix_norm(B).item()) if B.numel() > 0 else 0.0\n if nB > capv and nB > 0:\n B = (capv / nB) * B\n except Exception:\n pass\n e: Dict[str, np.ndarray] = {\n \"A\": A.numpy().astype(np.float32),\n \"B\": B.numpy().astype(np.float32),\n \"gate\": np.array([gate], dtype=np.float32),\n }\n # Torch path does not currently support learn_bias flag; add zero bias for parity if requested via target_weights special key later if needed\n layer_state[tgt] = e\n layers.append(layer_state)\n\n return {\"layers\": layers, \"rank\": rank, \"d_model\": d_model, \"targets\": targets, \"gates\": np.array(gates, dtype=np.float32)}\n\n\ndef save_npz(out_dir: str, *, embedding: Dict[str, np.ndarray], adapters: Dict[str, Any], manifest: Dict[str, Any]) -> None:\n os.makedirs(out_dir, exist_ok=True)\n np.savez_compressed(os.path.join(out_dir, \"embedding.npz\"), **embedding)\n # flatten adapter arrays\n flat: Dict[str, np.ndarray] = {}\n for i, layer in enumerate(adapters[\"layers\"]):\n for name, tensors in layer.items():\n flat[f\"L{i}.{name}.A\"] = tensors[\"A\"]\n flat[f\"L{i}.{name}.B\"] = tensors[\"B\"]\n gate_val = float(tensors[\"gate\"][0]) if isinstance(tensors.get(\"gate\"), np.ndarray) else float(tensors.get(\"gate\", 0.0))\n flat[f\"L{i}.{name}.gate\"] = np.array(gate_val, dtype=np.float32)\n if \"bias\" in tensors and isinstance(tensors[\"bias\"], np.ndarray):\n flat[f\"L{i}.{name}.bias\"] = tensors[\"bias\"]\n np.savez_compressed(os.path.join(out_dir, \"adapters.npz\"), **flat)\n open(os.path.join(out_dir, \"manifest.json\"), \"w\", encoding=\"utf-8\").write(json.dumps(manifest, indent=2))\n\ndef load_adapters_npz(path: str) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n data = np.load(path)\n # infer indices\n layers: Dict[int, Dict[str, Dict[str, np.ndarray]]] = {}\n for key in data.files:\n # L{idx}.{name}.{A|B|gate}\n parts = key.split(\".\")\n if len(parts) != 3:\n continue\n lid = int(parts[0][1:])\n name = parts[1]\n kind = parts[2]\n layers.setdefault(lid, {}).setdefault(name, {})[kind] = data[key]\n ordered = [layers[i] for i in sorted(layers.keys())]\n return {\"layers\": ordered}","source_hash":"b3a3d83adc4e6e7e862319c4ece6e97abf2b17399e2a67e387c23a34a8c89120","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.adapter.load_adapters_npz","uri":"program://Program_Conditioned_Adapter/function/modules.adapter.load_adapters_npz#L276-L290","kind":"function","name":"load_adapters_npz","path":"modules/adapter.py","language":"python","start_line":276,"end_line":290,"context_start_line":256,"context_end_line":290,"code":"\n return {\"layers\": layers, \"rank\": rank, \"d_model\": d_model, \"targets\": targets, \"gates\": np.array(gates, dtype=np.float32)}\n\n\ndef save_npz(out_dir: str, *, embedding: Dict[str, np.ndarray], adapters: Dict[str, Any], manifest: Dict[str, Any]) -> None:\n os.makedirs(out_dir, exist_ok=True)\n np.savez_compressed(os.path.join(out_dir, \"embedding.npz\"), **embedding)\n # flatten adapter arrays\n flat: Dict[str, np.ndarray] = {}\n for i, layer in enumerate(adapters[\"layers\"]):\n for name, tensors in layer.items():\n flat[f\"L{i}.{name}.A\"] = tensors[\"A\"]\n flat[f\"L{i}.{name}.B\"] = tensors[\"B\"]\n gate_val = float(tensors[\"gate\"][0]) if isinstance(tensors.get(\"gate\"), np.ndarray) else float(tensors.get(\"gate\", 0.0))\n flat[f\"L{i}.{name}.gate\"] = np.array(gate_val, dtype=np.float32)\n if \"bias\" in tensors and isinstance(tensors[\"bias\"], np.ndarray):\n flat[f\"L{i}.{name}.bias\"] = tensors[\"bias\"]\n np.savez_compressed(os.path.join(out_dir, \"adapters.npz\"), **flat)\n open(os.path.join(out_dir, \"manifest.json\"), \"w\", encoding=\"utf-8\").write(json.dumps(manifest, indent=2))\n\ndef load_adapters_npz(path: str) -> Dict[str, List[Dict[str, Dict[str, np.ndarray]]]]:\n data = np.load(path)\n # infer indices\n layers: Dict[int, Dict[str, Dict[str, np.ndarray]]] = {}\n for key in data.files:\n # L{idx}.{name}.{A|B|gate}\n parts = key.split(\".\")\n if len(parts) != 3:\n continue\n lid = int(parts[0][1:])\n name = parts[1]\n kind = parts[2]\n layers.setdefault(lid, {}).setdefault(name, {})[kind] = data[key]\n ordered = [layers[i] for i in sorted(layers.keys())]\n return {\"layers\": ordered}","source_hash":"b3a3d83adc4e6e7e862319c4ece6e97abf2b17399e2a67e387c23a34a8c89120","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.mixing","uri":"program://Program_Conditioned_Adapter/module/modules.mixing#L1-L170","kind":"module","name":"modules.mixing","path":"modules/mixing.py","language":"python","start_line":1,"end_line":170,"context_start_line":1,"context_end_line":170,"code":"# delta-cap guard (lift from modular.py)\nimport os\nimport re\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\n\nfrom blocks.targets import targets_map\nfrom blocks.utils import getattr_nested\n\ndef register_hook_mixed_adapters(\n model: Any,\n base_layers: List[Dict[str, Dict[str, np.ndarray]]],\n sub_layers: Optional[List[Dict[str, Dict[str, np.ndarray]]]],\n *,\n alpha_star: float,\n g_sub: float,\n rank: int,\n beta: float,\n target_weights: Optional[Dict[str, float]] = None,\n backend: str = \"local\",\n layer_multipliers: Optional[List[float]] = None,\n per_target_keep: Optional[Dict[str, int]] = None,\n per_target_keep_layers: Optional[List[Dict[str, int]]] = None,\n delta_cap: Optional[float] = None,\n):\n tmap = targets_map(backend)\n target_weights = target_weights or {}\n per_target_keep = per_target_keep or {}\n per_target_keep_layers = per_target_keep_layers or []\n\n def _mat(A: np.ndarray, B: np.ndarray, *, keep: Optional[int] = None) -> torch.Tensor:\n if isinstance(keep, int) and keep > 0:\n k = int(min(keep, A.shape[1], B.shape[0]))\n if k > 0:\n At = torch.from_numpy(A[:, :k]).to(torch.float32)\n Bt = torch.from_numpy(B[:k, :]).to(torch.float32)\n return At @ Bt\n return torch.from_numpy(A).to(torch.float32) @ torch.from_numpy(B).to(torch.float32)\n\n L = max(len(base_layers or []), len(sub_layers or []))\n scale = float(alpha_star) / float(max(1, int(rank)))\n # Store CPU-offloaded records of applied deltas to avoid holding large GPU tensors\n # Each record: (param, cpu_delta, mode, extra)\n # mode == \"full\" -> subtract full delta\n # mode == \"slice\" -> subtract into row slice [start:end]\n applied: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]] = []\n # layers list\n try:\n layers = list(getattr(getattr(model, \"model\", model), \"layers\"))\n except Exception:\n try:\n layers = list(getattr(model, \"blocks\"))\n except Exception:\n layers = []\n\n def _cap_delta_if_needed(w: torch.nn.Parameter, d: torch.Tensor) -> torch.Tensor:\n try:\n # Prefer explicit parameter; else env fallbacks\n if delta_cap is not None:\n cap = float(delta_cap)\n else:\n cap_env = (os.environ.get(\"PCA_DELTA_CAP\") or os.environ.get(\"REPO_ADAPTER_DELTA_CAP\") or \"\").strip()\n cap = float(cap_env) if cap_env else 0.0\n except Exception:\n cap = 0.0\n if cap is None or cap <= 0:\n return d\n try:\n wn = float(w.data.norm().item())\n dn = float(d.norm().item())\n if dn > 0 and wn > 0:\n limit = cap * wn\n if dn > limit:\n s = float(limit / max(dn, 1e-12))\n return (s * d)\n except Exception:\n return d\n return d\n\n for i in range(L):\n if i >= len(layers):\n break\n layer = layers[i]\n lm = 1.0\n try:\n if layer_multipliers is not None and i < len(layer_multipliers):\n lm = float(layer_multipliers[i])\n except Exception:\n lm = 1.0\n # Build deltas for this layer only on CPU, apply, then free\n base = base_layers[i] if i < len(base_layers or []) else None\n sub = (sub_layers[i] if (sub_layers is not None and i < len(sub_layers)) else None)\n deltas_cpu: Dict[str, torch.Tensor] = {}\n for name in tmap.keys():\n acc: Optional[torch.Tensor] = None\n # layer-specific keep overrides global keep\n keep_layer = None\n try:\n if 0 <= i < len(per_target_keep_layers):\n keep_layer = int(per_target_keep_layers[i].get(name, 0))\n except Exception:\n keep_layer = None\n keep_global = int(per_target_keep.get(name, 0)) if per_target_keep else 0\n keep_eff = keep_layer if (keep_layer and keep_layer > 0) else (keep_global if keep_global > 0 else None)\n if base is not None and name in base:\n try:\n acc = _mat(base[name][\"A\"], base[name][\"B\"], keep=keep_eff).to(torch.float32)\n except Exception:\n acc = None\n if sub is not None and name in sub:\n try:\n sub_m = _mat(sub[name][\"A\"], sub[name][\"B\"], keep=keep_eff).to(torch.float32)\n acc = sub_m if acc is None else ((1.0 - float(g_sub)) * acc + float(g_sub) * sub_m)\n except Exception:\n pass\n if acc is not None:\n tw = float(target_weights.get(name, 1.0))\n deltas_cpu[name] = (lm * tw * acc).contiguous()\n for short, rel in tmap.items():\n if short not in deltas_cpu:\n continue\n try:\n mod = getattr_nested(layer, rel)\n w: torch.nn.Parameter = getattr(mod, \"weight\") # type: ignore[assignment]\n d_cpu = (scale * deltas_cpu[short])\n d = d_cpu.to(w.device, dtype=w.dtype)\n d = _cap_delta_if_needed(w, d)\n if rel.endswith(\"mlp.w_in\") and short in (\"gate_proj\", \"up_proj\"):\n half = int(w.shape[0] // 2)\n if short == \"gate_proj\":\n part = d[:half, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[:half, :].add_(part)\n applied.append((w, d_cpu[:half, :].detach().to(\"cpu\"), \"slice\", (0, half)))\n else:\n part = d[-half:, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[half:, :].add_(part)\n applied.append((w, d_cpu[-half:, :].detach().to(\"cpu\"), \"slice\", (half, w.shape[0])))\n else:\n w.data.add_(d)\n applied.append((w, d_cpu.detach().to(\"cpu\"), \"full\", (0, 0)))\n except Exception:\n continue\n del deltas_cpu\n\n class _Applied:\n def __init__(self, records: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]]):\n self._records = records\n self._removed = False\n\n def remove(self) -> None:\n if self._removed:\n return\n for w, d_cpu, mode, extra in self._records:\n try:\n if mode == \"full\":\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data.sub_(d_dev)\n elif mode == \"slice\":\n start, end = extra\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data[start:end, :].sub_(d_dev)\n except Exception:\n pass\n self._removed = True\n\n return [_Applied(applied)]","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.mixing.register_hook_mixed_adapters","uri":"program://Program_Conditioned_Adapter/function/modules.mixing.register_hook_mixed_adapters#L12-L170","kind":"function","name":"register_hook_mixed_adapters","path":"modules/mixing.py","language":"python","start_line":12,"end_line":170,"context_start_line":1,"context_end_line":170,"code":"# delta-cap guard (lift from modular.py)\nimport os\nimport re\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\n\nfrom blocks.targets import targets_map\nfrom blocks.utils import getattr_nested\n\ndef register_hook_mixed_adapters(\n model: Any,\n base_layers: List[Dict[str, Dict[str, np.ndarray]]],\n sub_layers: Optional[List[Dict[str, Dict[str, np.ndarray]]]],\n *,\n alpha_star: float,\n g_sub: float,\n rank: int,\n beta: float,\n target_weights: Optional[Dict[str, float]] = None,\n backend: str = \"local\",\n layer_multipliers: Optional[List[float]] = None,\n per_target_keep: Optional[Dict[str, int]] = None,\n per_target_keep_layers: Optional[List[Dict[str, int]]] = None,\n delta_cap: Optional[float] = None,\n):\n tmap = targets_map(backend)\n target_weights = target_weights or {}\n per_target_keep = per_target_keep or {}\n per_target_keep_layers = per_target_keep_layers or []\n\n def _mat(A: np.ndarray, B: np.ndarray, *, keep: Optional[int] = None) -> torch.Tensor:\n if isinstance(keep, int) and keep > 0:\n k = int(min(keep, A.shape[1], B.shape[0]))\n if k > 0:\n At = torch.from_numpy(A[:, :k]).to(torch.float32)\n Bt = torch.from_numpy(B[:k, :]).to(torch.float32)\n return At @ Bt\n return torch.from_numpy(A).to(torch.float32) @ torch.from_numpy(B).to(torch.float32)\n\n L = max(len(base_layers or []), len(sub_layers or []))\n scale = float(alpha_star) / float(max(1, int(rank)))\n # Store CPU-offloaded records of applied deltas to avoid holding large GPU tensors\n # Each record: (param, cpu_delta, mode, extra)\n # mode == \"full\" -> subtract full delta\n # mode == \"slice\" -> subtract into row slice [start:end]\n applied: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]] = []\n # layers list\n try:\n layers = list(getattr(getattr(model, \"model\", model), \"layers\"))\n except Exception:\n try:\n layers = list(getattr(model, \"blocks\"))\n except Exception:\n layers = []\n\n def _cap_delta_if_needed(w: torch.nn.Parameter, d: torch.Tensor) -> torch.Tensor:\n try:\n # Prefer explicit parameter; else env fallbacks\n if delta_cap is not None:\n cap = float(delta_cap)\n else:\n cap_env = (os.environ.get(\"PCA_DELTA_CAP\") or os.environ.get(\"REPO_ADAPTER_DELTA_CAP\") or \"\").strip()\n cap = float(cap_env) if cap_env else 0.0\n except Exception:\n cap = 0.0\n if cap is None or cap <= 0:\n return d\n try:\n wn = float(w.data.norm().item())\n dn = float(d.norm().item())\n if dn > 0 and wn > 0:\n limit = cap * wn\n if dn > limit:\n s = float(limit / max(dn, 1e-12))\n return (s * d)\n except Exception:\n return d\n return d\n\n for i in range(L):\n if i >= len(layers):\n break\n layer = layers[i]\n lm = 1.0\n try:\n if layer_multipliers is not None and i < len(layer_multipliers):\n lm = float(layer_multipliers[i])\n except Exception:\n lm = 1.0\n # Build deltas for this layer only on CPU, apply, then free\n base = base_layers[i] if i < len(base_layers or []) else None\n sub = (sub_layers[i] if (sub_layers is not None and i < len(sub_layers)) else None)\n deltas_cpu: Dict[str, torch.Tensor] = {}\n for name in tmap.keys():\n acc: Optional[torch.Tensor] = None\n # layer-specific keep overrides global keep\n keep_layer = None\n try:\n if 0 <= i < len(per_target_keep_layers):\n keep_layer = int(per_target_keep_layers[i].get(name, 0))\n except Exception:\n keep_layer = None\n keep_global = int(per_target_keep.get(name, 0)) if per_target_keep else 0\n keep_eff = keep_layer if (keep_layer and keep_layer > 0) else (keep_global if keep_global > 0 else None)\n if base is not None and name in base:\n try:\n acc = _mat(base[name][\"A\"], base[name][\"B\"], keep=keep_eff).to(torch.float32)\n except Exception:\n acc = None\n if sub is not None and name in sub:\n try:\n sub_m = _mat(sub[name][\"A\"], sub[name][\"B\"], keep=keep_eff).to(torch.float32)\n acc = sub_m if acc is None else ((1.0 - float(g_sub)) * acc + float(g_sub) * sub_m)\n except Exception:\n pass\n if acc is not None:\n tw = float(target_weights.get(name, 1.0))\n deltas_cpu[name] = (lm * tw * acc).contiguous()\n for short, rel in tmap.items():\n if short not in deltas_cpu:\n continue\n try:\n mod = getattr_nested(layer, rel)\n w: torch.nn.Parameter = getattr(mod, \"weight\") # type: ignore[assignment]\n d_cpu = (scale * deltas_cpu[short])\n d = d_cpu.to(w.device, dtype=w.dtype)\n d = _cap_delta_if_needed(w, d)\n if rel.endswith(\"mlp.w_in\") and short in (\"gate_proj\", \"up_proj\"):\n half = int(w.shape[0] // 2)\n if short == \"gate_proj\":\n part = d[:half, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[:half, :].add_(part)\n applied.append((w, d_cpu[:half, :].detach().to(\"cpu\"), \"slice\", (0, half)))\n else:\n part = d[-half:, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[half:, :].add_(part)\n applied.append((w, d_cpu[-half:, :].detach().to(\"cpu\"), \"slice\", (half, w.shape[0])))\n else:\n w.data.add_(d)\n applied.append((w, d_cpu.detach().to(\"cpu\"), \"full\", (0, 0)))\n except Exception:\n continue\n del deltas_cpu\n\n class _Applied:\n def __init__(self, records: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]]):\n self._records = records\n self._removed = False\n\n def remove(self) -> None:\n if self._removed:\n return\n for w, d_cpu, mode, extra in self._records:\n try:\n if mode == \"full\":\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data.sub_(d_dev)\n elif mode == \"slice\":\n start, end = extra\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data[start:end, :].sub_(d_dev)\n except Exception:\n pass\n self._removed = True\n\n return [_Applied(applied)]","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.mixing._mat","uri":"program://Program_Conditioned_Adapter/function/modules.mixing._mat#L33-L40","kind":"function","name":"_mat","path":"modules/mixing.py","language":"python","start_line":33,"end_line":40,"context_start_line":13,"context_end_line":60,"code":" model: Any,\n base_layers: List[Dict[str, Dict[str, np.ndarray]]],\n sub_layers: Optional[List[Dict[str, Dict[str, np.ndarray]]]],\n *,\n alpha_star: float,\n g_sub: float,\n rank: int,\n beta: float,\n target_weights: Optional[Dict[str, float]] = None,\n backend: str = \"local\",\n layer_multipliers: Optional[List[float]] = None,\n per_target_keep: Optional[Dict[str, int]] = None,\n per_target_keep_layers: Optional[List[Dict[str, int]]] = None,\n delta_cap: Optional[float] = None,\n):\n tmap = targets_map(backend)\n target_weights = target_weights or {}\n per_target_keep = per_target_keep or {}\n per_target_keep_layers = per_target_keep_layers or []\n\n def _mat(A: np.ndarray, B: np.ndarray, *, keep: Optional[int] = None) -> torch.Tensor:\n if isinstance(keep, int) and keep > 0:\n k = int(min(keep, A.shape[1], B.shape[0]))\n if k > 0:\n At = torch.from_numpy(A[:, :k]).to(torch.float32)\n Bt = torch.from_numpy(B[:k, :]).to(torch.float32)\n return At @ Bt\n return torch.from_numpy(A).to(torch.float32) @ torch.from_numpy(B).to(torch.float32)\n\n L = max(len(base_layers or []), len(sub_layers or []))\n scale = float(alpha_star) / float(max(1, int(rank)))\n # Store CPU-offloaded records of applied deltas to avoid holding large GPU tensors\n # Each record: (param, cpu_delta, mode, extra)\n # mode == \"full\" -> subtract full delta\n # mode == \"slice\" -> subtract into row slice [start:end]\n applied: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]] = []\n # layers list\n try:\n layers = list(getattr(getattr(model, \"model\", model), \"layers\"))\n except Exception:\n try:\n layers = list(getattr(model, \"blocks\"))\n except Exception:\n layers = []\n\n def _cap_delta_if_needed(w: torch.nn.Parameter, d: torch.Tensor) -> torch.Tensor:\n try:\n # Prefer explicit parameter; else env fallbacks","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.mixing._cap_delta_if_needed","uri":"program://Program_Conditioned_Adapter/function/modules.mixing._cap_delta_if_needed#L58-L80","kind":"function","name":"_cap_delta_if_needed","path":"modules/mixing.py","language":"python","start_line":58,"end_line":80,"context_start_line":38,"context_end_line":100,"code":" Bt = torch.from_numpy(B[:k, :]).to(torch.float32)\n return At @ Bt\n return torch.from_numpy(A).to(torch.float32) @ torch.from_numpy(B).to(torch.float32)\n\n L = max(len(base_layers or []), len(sub_layers or []))\n scale = float(alpha_star) / float(max(1, int(rank)))\n # Store CPU-offloaded records of applied deltas to avoid holding large GPU tensors\n # Each record: (param, cpu_delta, mode, extra)\n # mode == \"full\" -> subtract full delta\n # mode == \"slice\" -> subtract into row slice [start:end]\n applied: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]] = []\n # layers list\n try:\n layers = list(getattr(getattr(model, \"model\", model), \"layers\"))\n except Exception:\n try:\n layers = list(getattr(model, \"blocks\"))\n except Exception:\n layers = []\n\n def _cap_delta_if_needed(w: torch.nn.Parameter, d: torch.Tensor) -> torch.Tensor:\n try:\n # Prefer explicit parameter; else env fallbacks\n if delta_cap is not None:\n cap = float(delta_cap)\n else:\n cap_env = (os.environ.get(\"PCA_DELTA_CAP\") or os.environ.get(\"REPO_ADAPTER_DELTA_CAP\") or \"\").strip()\n cap = float(cap_env) if cap_env else 0.0\n except Exception:\n cap = 0.0\n if cap is None or cap <= 0:\n return d\n try:\n wn = float(w.data.norm().item())\n dn = float(d.norm().item())\n if dn > 0 and wn > 0:\n limit = cap * wn\n if dn > limit:\n s = float(limit / max(dn, 1e-12))\n return (s * d)\n except Exception:\n return d\n return d\n\n for i in range(L):\n if i >= len(layers):\n break\n layer = layers[i]\n lm = 1.0\n try:\n if layer_multipliers is not None and i < len(layer_multipliers):\n lm = float(layer_multipliers[i])\n except Exception:\n lm = 1.0\n # Build deltas for this layer only on CPU, apply, then free\n base = base_layers[i] if i < len(base_layers or []) else None\n sub = (sub_layers[i] if (sub_layers is not None and i < len(sub_layers)) else None)\n deltas_cpu: Dict[str, torch.Tensor] = {}\n for name in tmap.keys():\n acc: Optional[torch.Tensor] = None\n # layer-specific keep overrides global keep\n keep_layer = None\n try:","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.mixing._Applied","uri":"program://Program_Conditioned_Adapter/class/modules.mixing._Applied#L149-L168","kind":"class","name":"_Applied","path":"modules/mixing.py","language":"python","start_line":149,"end_line":168,"context_start_line":129,"context_end_line":170,"code":" d = _cap_delta_if_needed(w, d)\n if rel.endswith(\"mlp.w_in\") and short in (\"gate_proj\", \"up_proj\"):\n half = int(w.shape[0] // 2)\n if short == \"gate_proj\":\n part = d[:half, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[:half, :].add_(part)\n applied.append((w, d_cpu[:half, :].detach().to(\"cpu\"), \"slice\", (0, half)))\n else:\n part = d[-half:, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[half:, :].add_(part)\n applied.append((w, d_cpu[-half:, :].detach().to(\"cpu\"), \"slice\", (half, w.shape[0])))\n else:\n w.data.add_(d)\n applied.append((w, d_cpu.detach().to(\"cpu\"), \"full\", (0, 0)))\n except Exception:\n continue\n del deltas_cpu\n\n class _Applied:\n def __init__(self, records: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]]):\n self._records = records\n self._removed = False\n\n def remove(self) -> None:\n if self._removed:\n return\n for w, d_cpu, mode, extra in self._records:\n try:\n if mode == \"full\":\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data.sub_(d_dev)\n elif mode == \"slice\":\n start, end = extra\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data[start:end, :].sub_(d_dev)\n except Exception:\n pass\n self._removed = True\n\n return [_Applied(applied)]","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.mixing.__init__","uri":"program://Program_Conditioned_Adapter/function/modules.mixing.__init__#L150-L152","kind":"function","name":"__init__","path":"modules/mixing.py","language":"python","start_line":150,"end_line":152,"context_start_line":130,"context_end_line":170,"code":" if rel.endswith(\"mlp.w_in\") and short in (\"gate_proj\", \"up_proj\"):\n half = int(w.shape[0] // 2)\n if short == \"gate_proj\":\n part = d[:half, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[:half, :].add_(part)\n applied.append((w, d_cpu[:half, :].detach().to(\"cpu\"), \"slice\", (0, half)))\n else:\n part = d[-half:, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[half:, :].add_(part)\n applied.append((w, d_cpu[-half:, :].detach().to(\"cpu\"), \"slice\", (half, w.shape[0])))\n else:\n w.data.add_(d)\n applied.append((w, d_cpu.detach().to(\"cpu\"), \"full\", (0, 0)))\n except Exception:\n continue\n del deltas_cpu\n\n class _Applied:\n def __init__(self, records: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]]):\n self._records = records\n self._removed = False\n\n def remove(self) -> None:\n if self._removed:\n return\n for w, d_cpu, mode, extra in self._records:\n try:\n if mode == \"full\":\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data.sub_(d_dev)\n elif mode == \"slice\":\n start, end = extra\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data[start:end, :].sub_(d_dev)\n except Exception:\n pass\n self._removed = True\n\n return [_Applied(applied)]","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.mixing.remove","uri":"program://Program_Conditioned_Adapter/function/modules.mixing.remove#L154-L168","kind":"function","name":"remove","path":"modules/mixing.py","language":"python","start_line":154,"end_line":168,"context_start_line":134,"context_end_line":170,"code":" part = _cap_delta_if_needed(w, part)\n w.data[:half, :].add_(part)\n applied.append((w, d_cpu[:half, :].detach().to(\"cpu\"), \"slice\", (0, half)))\n else:\n part = d[-half:, :] if d.shape[0] == w.shape[0] else d\n part = _cap_delta_if_needed(w, part)\n w.data[half:, :].add_(part)\n applied.append((w, d_cpu[-half:, :].detach().to(\"cpu\"), \"slice\", (half, w.shape[0])))\n else:\n w.data.add_(d)\n applied.append((w, d_cpu.detach().to(\"cpu\"), \"full\", (0, 0)))\n except Exception:\n continue\n del deltas_cpu\n\n class _Applied:\n def __init__(self, records: List[Tuple[torch.nn.Parameter, torch.Tensor, str, Tuple[int, int]]]):\n self._records = records\n self._removed = False\n\n def remove(self) -> None:\n if self._removed:\n return\n for w, d_cpu, mode, extra in self._records:\n try:\n if mode == \"full\":\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data.sub_(d_dev)\n elif mode == \"slice\":\n start, end = extra\n d_dev = d_cpu.to(device=w.device, dtype=w.dtype)\n w.data[start:end, :].sub_(d_dev)\n except Exception:\n pass\n self._removed = True\n\n return [_Applied(applied)]","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.telemetry","uri":"program://Program_Conditioned_Adapter/module/modules.telemetry#L1-L1","kind":"module","name":"modules.telemetry","path":"modules/telemetry.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"# telemetry meta assembly and write (from run_repo_adapter.py)","source_hash":"fd0ec51c4fcadc9d7c93e0efda60fd8e94ecf12cd65e99e5d50b96ed5decaba1","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state","uri":"program://Program_Conditioned_Adapter/module/modules.program_state#L1-L148","kind":"module","name":"modules.program_state","path":"modules/program_state.py","language":"python","start_line":1,"end_line":148,"context_start_line":1,"context_end_line":148,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, asdict\nfrom typing import Any, Dict, List, Tuple, Optional\nimport json\nimport hashlib\nimport os\nimport time\n\n\n@dataclass\nclass ProgramState:\n root: str\n candidates_modules: List[str]\n candidates_files: List[str]\n citations: List[Tuple[str, int, int]]\n vec: Optional[List[float]] = None\n vec_weight: float = 1.0\n H: float = 0.0\n behavior_log: List[Dict[str, Any]] = None # type: ignore[assignment]\n\n def __post_init__(self) -> None:\n if self.behavior_log is None:\n self.behavior_log = []\n\n def checksum(self) -> str:\n try:\n payload = {\n \"root\": self.root,\n \"mods\": sorted(list(self.candidates_modules)),\n \"files\": sorted(list(self.candidates_files)),\n \"cites\": sorted([(p, int(a), int(b)) for (p, a, b) in self.citations]),\n \"H\": float(self.H),\n \"vec_w\": float(self.vec_weight),\n \"vec_n\": int(len(self.vec) if isinstance(self.vec, list) else 0),\n }\n raw = json.dumps(payload, sort_keys=True).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(raw).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _ensure_dir(p: str) -> None:\n try:\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n\n\ndef _load_json(path: str) -> Dict[str, Any]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n return json.loads(fh.read())\n except Exception:\n return {}\n\n\ndef _save_json(path: str, obj: Dict[str, Any]) -> None:\n _ensure_dir(path)\n try:\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef _from_dict(d: Dict[str, Any]) -> ProgramState:\n return ProgramState(\n root=str(d.get(\"root\") or d.get(\"program_root\") or \"\"),\n candidates_modules=list(d.get(\"candidates_modules\") or []),\n candidates_files=list(d.get(\"candidates_files\") or []),\n citations=[(str(p), int(a), int(b)) for (p, a, b) in (d.get(\"citations\") or [])],\n vec=(list(d.get(\"vec\")) if isinstance(d.get(\"vec\"), list) else None),\n vec_weight=float(d.get(\"vec_weight\") or 1.0),\n H=float(d.get(\"H\") or 0.0),\n behavior_log=list(d.get(\"behavior_log\") or []),\n )\n\n\ndef _to_dict(s: ProgramState) -> Dict[str, Any]:\n obj = asdict(s)\n obj[\"schema_version\"] = 1\n obj[\"updated_at\"] = time.time()\n return obj\n\n\ndef load_program_state(root: str, *, path: str | None = None) -> ProgramState:\n if not path or not os.path.exists(path):\n return ProgramState(root=str(root), candidates_modules=[], candidates_files=[], citations=[], vec=None, vec_weight=1.0, H=0.0, behavior_log=[])\n return _from_dict(_load_json(path))\n\n\ndef save_program_state(state: ProgramState, *, path: str | None = None) -> None:\n if not path:\n return\n _save_json(path, _to_dict(state))\n\n\ndef join_program_states(a: ProgramState, b: ProgramState) -> ProgramState:\n mods = sorted(list(set(a.candidates_modules) | set(b.candidates_modules)))\n files = sorted(list(set(a.candidates_files) | set(b.candidates_files)))\n cites = sorted(list(set(a.citations) | set(b.citations)))\n # Prefer newer vec if provided; blend weight heuristically\n vec = b.vec if (b.vec and len(b.vec) > 0) else a.vec\n vw = float(max(0.0, (a.vec_weight if a.vec else 0.0) + (b.vec_weight if b.vec else 0.0)))\n H = float(max(0.0, a.H + b.H))\n beh = list(a.behavior_log or []) + list(b.behavior_log or [])\n return ProgramState(\n root=(b.root or a.root),\n candidates_modules=mods,\n candidates_files=files,\n citations=cites,\n vec=vec,\n vec_weight=(vw if vw > 0 else 1.0),\n H=H,\n behavior_log=beh,\n )\n\n\ndef new_state_from_run(\n root: str,\n *,\n modules: List[str],\n files: List[str],\n citations: List[Tuple[str, int, int]],\n z_vec: Any = None,\n beh_event: Dict[str, Any] | None = None,\n H_increment: float = 0.0,\n) -> ProgramState:\n return ProgramState(\n root=str(root),\n candidates_modules=list(modules or []),\n candidates_files=list(files or []),\n citations=list(citations or []),\n vec=(list(z_vec) if isinstance(z_vec, list) else (z_vec.tolist() if hasattr(z_vec, \"tolist\") else None)),\n vec_weight=1.0,\n H=float(max(0.0, H_increment or 0.0)),\n behavior_log=[(beh_event or {})],\n )\n\n\ndef changed_bits(a: ProgramState, b: ProgramState) -> bool:\n try:\n return a.checksum() != b.checksum()\n except Exception:\n return True\n\n","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.ProgramState","uri":"program://Program_Conditioned_Adapter/class/modules.program_state.ProgramState#L12-L40","kind":"class","name":"ProgramState","path":"modules/program_state.py","language":"python","start_line":12,"end_line":40,"context_start_line":1,"context_end_line":60,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, asdict\nfrom typing import Any, Dict, List, Tuple, Optional\nimport json\nimport hashlib\nimport os\nimport time\n\n\n@dataclass\nclass ProgramState:\n root: str\n candidates_modules: List[str]\n candidates_files: List[str]\n citations: List[Tuple[str, int, int]]\n vec: Optional[List[float]] = None\n vec_weight: float = 1.0\n H: float = 0.0\n behavior_log: List[Dict[str, Any]] = None # type: ignore[assignment]\n\n def __post_init__(self) -> None:\n if self.behavior_log is None:\n self.behavior_log = []\n\n def checksum(self) -> str:\n try:\n payload = {\n \"root\": self.root,\n \"mods\": sorted(list(self.candidates_modules)),\n \"files\": sorted(list(self.candidates_files)),\n \"cites\": sorted([(p, int(a), int(b)) for (p, a, b) in self.citations]),\n \"H\": float(self.H),\n \"vec_w\": float(self.vec_weight),\n \"vec_n\": int(len(self.vec) if isinstance(self.vec, list) else 0),\n }\n raw = json.dumps(payload, sort_keys=True).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(raw).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _ensure_dir(p: str) -> None:\n try:\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n\n\ndef _load_json(path: str) -> Dict[str, Any]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n return json.loads(fh.read())\n except Exception:\n return {}\n\n\ndef _save_json(path: str, obj: Dict[str, Any]) -> None:\n _ensure_dir(path)\n try:","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state._ensure_dir","uri":"program://Program_Conditioned_Adapter/function/modules.program_state._ensure_dir#L43-L47","kind":"function","name":"_ensure_dir","path":"modules/program_state.py","language":"python","start_line":43,"end_line":47,"context_start_line":23,"context_end_line":67,"code":" if self.behavior_log is None:\n self.behavior_log = []\n\n def checksum(self) -> str:\n try:\n payload = {\n \"root\": self.root,\n \"mods\": sorted(list(self.candidates_modules)),\n \"files\": sorted(list(self.candidates_files)),\n \"cites\": sorted([(p, int(a), int(b)) for (p, a, b) in self.citations]),\n \"H\": float(self.H),\n \"vec_w\": float(self.vec_weight),\n \"vec_n\": int(len(self.vec) if isinstance(self.vec, list) else 0),\n }\n raw = json.dumps(payload, sort_keys=True).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(raw).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _ensure_dir(p: str) -> None:\n try:\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n\n\ndef _load_json(path: str) -> Dict[str, Any]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n return json.loads(fh.read())\n except Exception:\n return {}\n\n\ndef _save_json(path: str, obj: Dict[str, Any]) -> None:\n _ensure_dir(path)\n try:\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef _from_dict(d: Dict[str, Any]) -> ProgramState:","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state._load_json","uri":"program://Program_Conditioned_Adapter/function/modules.program_state._load_json#L50-L55","kind":"function","name":"_load_json","path":"modules/program_state.py","language":"python","start_line":50,"end_line":55,"context_start_line":30,"context_end_line":75,"code":" \"mods\": sorted(list(self.candidates_modules)),\n \"files\": sorted(list(self.candidates_files)),\n \"cites\": sorted([(p, int(a), int(b)) for (p, a, b) in self.citations]),\n \"H\": float(self.H),\n \"vec_w\": float(self.vec_weight),\n \"vec_n\": int(len(self.vec) if isinstance(self.vec, list) else 0),\n }\n raw = json.dumps(payload, sort_keys=True).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(raw).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _ensure_dir(p: str) -> None:\n try:\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n\n\ndef _load_json(path: str) -> Dict[str, Any]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n return json.loads(fh.read())\n except Exception:\n return {}\n\n\ndef _save_json(path: str, obj: Dict[str, Any]) -> None:\n _ensure_dir(path)\n try:\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef _from_dict(d: Dict[str, Any]) -> ProgramState:\n return ProgramState(\n root=str(d.get(\"root\") or d.get(\"program_root\") or \"\"),\n candidates_modules=list(d.get(\"candidates_modules\") or []),\n candidates_files=list(d.get(\"candidates_files\") or []),\n citations=[(str(p), int(a), int(b)) for (p, a, b) in (d.get(\"citations\") or [])],\n vec=(list(d.get(\"vec\")) if isinstance(d.get(\"vec\"), list) else None),\n vec_weight=float(d.get(\"vec_weight\") or 1.0),\n H=float(d.get(\"H\") or 0.0),","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state._save_json","uri":"program://Program_Conditioned_Adapter/function/modules.program_state._save_json#L58-L64","kind":"function","name":"_save_json","path":"modules/program_state.py","language":"python","start_line":58,"end_line":64,"context_start_line":38,"context_end_line":84,"code":" return hashlib.sha256(raw).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _ensure_dir(p: str) -> None:\n try:\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n\n\ndef _load_json(path: str) -> Dict[str, Any]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n return json.loads(fh.read())\n except Exception:\n return {}\n\n\ndef _save_json(path: str, obj: Dict[str, Any]) -> None:\n _ensure_dir(path)\n try:\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef _from_dict(d: Dict[str, Any]) -> ProgramState:\n return ProgramState(\n root=str(d.get(\"root\") or d.get(\"program_root\") or \"\"),\n candidates_modules=list(d.get(\"candidates_modules\") or []),\n candidates_files=list(d.get(\"candidates_files\") or []),\n citations=[(str(p), int(a), int(b)) for (p, a, b) in (d.get(\"citations\") or [])],\n vec=(list(d.get(\"vec\")) if isinstance(d.get(\"vec\"), list) else None),\n vec_weight=float(d.get(\"vec_weight\") or 1.0),\n H=float(d.get(\"H\") or 0.0),\n behavior_log=list(d.get(\"behavior_log\") or []),\n )\n\n\ndef _to_dict(s: ProgramState) -> Dict[str, Any]:\n obj = asdict(s)\n obj[\"schema_version\"] = 1\n obj[\"updated_at\"] = time.time()\n return obj","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state._from_dict","uri":"program://Program_Conditioned_Adapter/function/modules.program_state._from_dict#L67-L77","kind":"function","name":"_from_dict","path":"modules/program_state.py","language":"python","start_line":67,"end_line":77,"context_start_line":47,"context_end_line":97,"code":" pass\n\n\ndef _load_json(path: str) -> Dict[str, Any]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n return json.loads(fh.read())\n except Exception:\n return {}\n\n\ndef _save_json(path: str, obj: Dict[str, Any]) -> None:\n _ensure_dir(path)\n try:\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef _from_dict(d: Dict[str, Any]) -> ProgramState:\n return ProgramState(\n root=str(d.get(\"root\") or d.get(\"program_root\") or \"\"),\n candidates_modules=list(d.get(\"candidates_modules\") or []),\n candidates_files=list(d.get(\"candidates_files\") or []),\n citations=[(str(p), int(a), int(b)) for (p, a, b) in (d.get(\"citations\") or [])],\n vec=(list(d.get(\"vec\")) if isinstance(d.get(\"vec\"), list) else None),\n vec_weight=float(d.get(\"vec_weight\") or 1.0),\n H=float(d.get(\"H\") or 0.0),\n behavior_log=list(d.get(\"behavior_log\") or []),\n )\n\n\ndef _to_dict(s: ProgramState) -> Dict[str, Any]:\n obj = asdict(s)\n obj[\"schema_version\"] = 1\n obj[\"updated_at\"] = time.time()\n return obj\n\n\ndef load_program_state(root: str, *, path: str | None = None) -> ProgramState:\n if not path or not os.path.exists(path):\n return ProgramState(root=str(root), candidates_modules=[], candidates_files=[], citations=[], vec=None, vec_weight=1.0, H=0.0, behavior_log=[])\n return _from_dict(_load_json(path))\n\n\ndef save_program_state(state: ProgramState, *, path: str | None = None) -> None:\n if not path:\n return\n _save_json(path, _to_dict(state))\n","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state._to_dict","uri":"program://Program_Conditioned_Adapter/function/modules.program_state._to_dict#L80-L84","kind":"function","name":"_to_dict","path":"modules/program_state.py","language":"python","start_line":80,"end_line":84,"context_start_line":60,"context_end_line":104,"code":" try:\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef _from_dict(d: Dict[str, Any]) -> ProgramState:\n return ProgramState(\n root=str(d.get(\"root\") or d.get(\"program_root\") or \"\"),\n candidates_modules=list(d.get(\"candidates_modules\") or []),\n candidates_files=list(d.get(\"candidates_files\") or []),\n citations=[(str(p), int(a), int(b)) for (p, a, b) in (d.get(\"citations\") or [])],\n vec=(list(d.get(\"vec\")) if isinstance(d.get(\"vec\"), list) else None),\n vec_weight=float(d.get(\"vec_weight\") or 1.0),\n H=float(d.get(\"H\") or 0.0),\n behavior_log=list(d.get(\"behavior_log\") or []),\n )\n\n\ndef _to_dict(s: ProgramState) -> Dict[str, Any]:\n obj = asdict(s)\n obj[\"schema_version\"] = 1\n obj[\"updated_at\"] = time.time()\n return obj\n\n\ndef load_program_state(root: str, *, path: str | None = None) -> ProgramState:\n if not path or not os.path.exists(path):\n return ProgramState(root=str(root), candidates_modules=[], candidates_files=[], citations=[], vec=None, vec_weight=1.0, H=0.0, behavior_log=[])\n return _from_dict(_load_json(path))\n\n\ndef save_program_state(state: ProgramState, *, path: str | None = None) -> None:\n if not path:\n return\n _save_json(path, _to_dict(state))\n\n\ndef join_program_states(a: ProgramState, b: ProgramState) -> ProgramState:\n mods = sorted(list(set(a.candidates_modules) | set(b.candidates_modules)))\n files = sorted(list(set(a.candidates_files) | set(b.candidates_files)))\n cites = sorted(list(set(a.citations) | set(b.citations)))\n # Prefer newer vec if provided; blend weight heuristically\n vec = b.vec if (b.vec and len(b.vec) > 0) else a.vec","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.load_program_state","uri":"program://Program_Conditioned_Adapter/function/modules.program_state.load_program_state#L87-L90","kind":"function","name":"load_program_state","path":"modules/program_state.py","language":"python","start_line":87,"end_line":90,"context_start_line":67,"context_end_line":110,"code":"def _from_dict(d: Dict[str, Any]) -> ProgramState:\n return ProgramState(\n root=str(d.get(\"root\") or d.get(\"program_root\") or \"\"),\n candidates_modules=list(d.get(\"candidates_modules\") or []),\n candidates_files=list(d.get(\"candidates_files\") or []),\n citations=[(str(p), int(a), int(b)) for (p, a, b) in (d.get(\"citations\") or [])],\n vec=(list(d.get(\"vec\")) if isinstance(d.get(\"vec\"), list) else None),\n vec_weight=float(d.get(\"vec_weight\") or 1.0),\n H=float(d.get(\"H\") or 0.0),\n behavior_log=list(d.get(\"behavior_log\") or []),\n )\n\n\ndef _to_dict(s: ProgramState) -> Dict[str, Any]:\n obj = asdict(s)\n obj[\"schema_version\"] = 1\n obj[\"updated_at\"] = time.time()\n return obj\n\n\ndef load_program_state(root: str, *, path: str | None = None) -> ProgramState:\n if not path or not os.path.exists(path):\n return ProgramState(root=str(root), candidates_modules=[], candidates_files=[], citations=[], vec=None, vec_weight=1.0, H=0.0, behavior_log=[])\n return _from_dict(_load_json(path))\n\n\ndef save_program_state(state: ProgramState, *, path: str | None = None) -> None:\n if not path:\n return\n _save_json(path, _to_dict(state))\n\n\ndef join_program_states(a: ProgramState, b: ProgramState) -> ProgramState:\n mods = sorted(list(set(a.candidates_modules) | set(b.candidates_modules)))\n files = sorted(list(set(a.candidates_files) | set(b.candidates_files)))\n cites = sorted(list(set(a.citations) | set(b.citations)))\n # Prefer newer vec if provided; blend weight heuristically\n vec = b.vec if (b.vec and len(b.vec) > 0) else a.vec\n vw = float(max(0.0, (a.vec_weight if a.vec else 0.0) + (b.vec_weight if b.vec else 0.0)))\n H = float(max(0.0, a.H + b.H))\n beh = list(a.behavior_log or []) + list(b.behavior_log or [])\n return ProgramState(\n root=(b.root or a.root),\n candidates_modules=mods,","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.save_program_state","uri":"program://Program_Conditioned_Adapter/function/modules.program_state.save_program_state#L93-L96","kind":"function","name":"save_program_state","path":"modules/program_state.py","language":"python","start_line":93,"end_line":96,"context_start_line":73,"context_end_line":116,"code":" vec=(list(d.get(\"vec\")) if isinstance(d.get(\"vec\"), list) else None),\n vec_weight=float(d.get(\"vec_weight\") or 1.0),\n H=float(d.get(\"H\") or 0.0),\n behavior_log=list(d.get(\"behavior_log\") or []),\n )\n\n\ndef _to_dict(s: ProgramState) -> Dict[str, Any]:\n obj = asdict(s)\n obj[\"schema_version\"] = 1\n obj[\"updated_at\"] = time.time()\n return obj\n\n\ndef load_program_state(root: str, *, path: str | None = None) -> ProgramState:\n if not path or not os.path.exists(path):\n return ProgramState(root=str(root), candidates_modules=[], candidates_files=[], citations=[], vec=None, vec_weight=1.0, H=0.0, behavior_log=[])\n return _from_dict(_load_json(path))\n\n\ndef save_program_state(state: ProgramState, *, path: str | None = None) -> None:\n if not path:\n return\n _save_json(path, _to_dict(state))\n\n\ndef join_program_states(a: ProgramState, b: ProgramState) -> ProgramState:\n mods = sorted(list(set(a.candidates_modules) | set(b.candidates_modules)))\n files = sorted(list(set(a.candidates_files) | set(b.candidates_files)))\n cites = sorted(list(set(a.citations) | set(b.citations)))\n # Prefer newer vec if provided; blend weight heuristically\n vec = b.vec if (b.vec and len(b.vec) > 0) else a.vec\n vw = float(max(0.0, (a.vec_weight if a.vec else 0.0) + (b.vec_weight if b.vec else 0.0)))\n H = float(max(0.0, a.H + b.H))\n beh = list(a.behavior_log or []) + list(b.behavior_log or [])\n return ProgramState(\n root=(b.root or a.root),\n candidates_modules=mods,\n candidates_files=files,\n citations=cites,\n vec=vec,\n vec_weight=(vw if vw > 0 else 1.0),\n H=H,\n behavior_log=beh,","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.join_program_states","uri":"program://Program_Conditioned_Adapter/function/modules.program_state.join_program_states#L99-L117","kind":"function","name":"join_program_states","path":"modules/program_state.py","language":"python","start_line":99,"end_line":117,"context_start_line":79,"context_end_line":137,"code":"\ndef _to_dict(s: ProgramState) -> Dict[str, Any]:\n obj = asdict(s)\n obj[\"schema_version\"] = 1\n obj[\"updated_at\"] = time.time()\n return obj\n\n\ndef load_program_state(root: str, *, path: str | None = None) -> ProgramState:\n if not path or not os.path.exists(path):\n return ProgramState(root=str(root), candidates_modules=[], candidates_files=[], citations=[], vec=None, vec_weight=1.0, H=0.0, behavior_log=[])\n return _from_dict(_load_json(path))\n\n\ndef save_program_state(state: ProgramState, *, path: str | None = None) -> None:\n if not path:\n return\n _save_json(path, _to_dict(state))\n\n\ndef join_program_states(a: ProgramState, b: ProgramState) -> ProgramState:\n mods = sorted(list(set(a.candidates_modules) | set(b.candidates_modules)))\n files = sorted(list(set(a.candidates_files) | set(b.candidates_files)))\n cites = sorted(list(set(a.citations) | set(b.citations)))\n # Prefer newer vec if provided; blend weight heuristically\n vec = b.vec if (b.vec and len(b.vec) > 0) else a.vec\n vw = float(max(0.0, (a.vec_weight if a.vec else 0.0) + (b.vec_weight if b.vec else 0.0)))\n H = float(max(0.0, a.H + b.H))\n beh = list(a.behavior_log or []) + list(b.behavior_log or [])\n return ProgramState(\n root=(b.root or a.root),\n candidates_modules=mods,\n candidates_files=files,\n citations=cites,\n vec=vec,\n vec_weight=(vw if vw > 0 else 1.0),\n H=H,\n behavior_log=beh,\n )\n\n\ndef new_state_from_run(\n root: str,\n *,\n modules: List[str],\n files: List[str],\n citations: List[Tuple[str, int, int]],\n z_vec: Any = None,\n beh_event: Dict[str, Any] | None = None,\n H_increment: float = 0.0,\n) -> ProgramState:\n return ProgramState(\n root=str(root),\n candidates_modules=list(modules or []),\n candidates_files=list(files or []),\n citations=list(citations or []),\n vec=(list(z_vec) if isinstance(z_vec, list) else (z_vec.tolist() if hasattr(z_vec, \"tolist\") else None)),\n vec_weight=1.0,\n H=float(max(0.0, H_increment or 0.0)),","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.new_state_from_run","uri":"program://Program_Conditioned_Adapter/function/modules.program_state.new_state_from_run#L120-L139","kind":"function","name":"new_state_from_run","path":"modules/program_state.py","language":"python","start_line":120,"end_line":139,"context_start_line":100,"context_end_line":148,"code":" mods = sorted(list(set(a.candidates_modules) | set(b.candidates_modules)))\n files = sorted(list(set(a.candidates_files) | set(b.candidates_files)))\n cites = sorted(list(set(a.citations) | set(b.citations)))\n # Prefer newer vec if provided; blend weight heuristically\n vec = b.vec if (b.vec and len(b.vec) > 0) else a.vec\n vw = float(max(0.0, (a.vec_weight if a.vec else 0.0) + (b.vec_weight if b.vec else 0.0)))\n H = float(max(0.0, a.H + b.H))\n beh = list(a.behavior_log or []) + list(b.behavior_log or [])\n return ProgramState(\n root=(b.root or a.root),\n candidates_modules=mods,\n candidates_files=files,\n citations=cites,\n vec=vec,\n vec_weight=(vw if vw > 0 else 1.0),\n H=H,\n behavior_log=beh,\n )\n\n\ndef new_state_from_run(\n root: str,\n *,\n modules: List[str],\n files: List[str],\n citations: List[Tuple[str, int, int]],\n z_vec: Any = None,\n beh_event: Dict[str, Any] | None = None,\n H_increment: float = 0.0,\n) -> ProgramState:\n return ProgramState(\n root=str(root),\n candidates_modules=list(modules or []),\n candidates_files=list(files or []),\n citations=list(citations or []),\n vec=(list(z_vec) if isinstance(z_vec, list) else (z_vec.tolist() if hasattr(z_vec, \"tolist\") else None)),\n vec_weight=1.0,\n H=float(max(0.0, H_increment or 0.0)),\n behavior_log=[(beh_event or {})],\n )\n\n\ndef changed_bits(a: ProgramState, b: ProgramState) -> bool:\n try:\n return a.checksum() != b.checksum()\n except Exception:\n return True\n\n","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.changed_bits","uri":"program://Program_Conditioned_Adapter/function/modules.program_state.changed_bits#L142-L146","kind":"function","name":"changed_bits","path":"modules/program_state.py","language":"python","start_line":142,"end_line":146,"context_start_line":122,"context_end_line":148,"code":" *,\n modules: List[str],\n files: List[str],\n citations: List[Tuple[str, int, int]],\n z_vec: Any = None,\n beh_event: Dict[str, Any] | None = None,\n H_increment: float = 0.0,\n) -> ProgramState:\n return ProgramState(\n root=str(root),\n candidates_modules=list(modules or []),\n candidates_files=list(files or []),\n citations=list(citations or []),\n vec=(list(z_vec) if isinstance(z_vec, list) else (z_vec.tolist() if hasattr(z_vec, \"tolist\") else None)),\n vec_weight=1.0,\n H=float(max(0.0, H_increment or 0.0)),\n behavior_log=[(beh_event or {})],\n )\n\n\ndef changed_bits(a: ProgramState, b: ProgramState) -> bool:\n try:\n return a.checksum() != b.checksum()\n except Exception:\n return True\n\n","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.__post_init__","uri":"program://Program_Conditioned_Adapter/function/modules.program_state.__post_init__#L22-L24","kind":"function","name":"__post_init__","path":"modules/program_state.py","language":"python","start_line":22,"end_line":24,"context_start_line":2,"context_end_line":44,"code":"\nfrom dataclasses import dataclass, asdict\nfrom typing import Any, Dict, List, Tuple, Optional\nimport json\nimport hashlib\nimport os\nimport time\n\n\n@dataclass\nclass ProgramState:\n root: str\n candidates_modules: List[str]\n candidates_files: List[str]\n citations: List[Tuple[str, int, int]]\n vec: Optional[List[float]] = None\n vec_weight: float = 1.0\n H: float = 0.0\n behavior_log: List[Dict[str, Any]] = None # type: ignore[assignment]\n\n def __post_init__(self) -> None:\n if self.behavior_log is None:\n self.behavior_log = []\n\n def checksum(self) -> str:\n try:\n payload = {\n \"root\": self.root,\n \"mods\": sorted(list(self.candidates_modules)),\n \"files\": sorted(list(self.candidates_files)),\n \"cites\": sorted([(p, int(a), int(b)) for (p, a, b) in self.citations]),\n \"H\": float(self.H),\n \"vec_w\": float(self.vec_weight),\n \"vec_n\": int(len(self.vec) if isinstance(self.vec, list) else 0),\n }\n raw = json.dumps(payload, sort_keys=True).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(raw).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _ensure_dir(p: str) -> None:\n try:","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.program_state.checksum","uri":"program://Program_Conditioned_Adapter/function/modules.program_state.checksum#L26-L40","kind":"function","name":"checksum","path":"modules/program_state.py","language":"python","start_line":26,"end_line":40,"context_start_line":6,"context_end_line":60,"code":"import hashlib\nimport os\nimport time\n\n\n@dataclass\nclass ProgramState:\n root: str\n candidates_modules: List[str]\n candidates_files: List[str]\n citations: List[Tuple[str, int, int]]\n vec: Optional[List[float]] = None\n vec_weight: float = 1.0\n H: float = 0.0\n behavior_log: List[Dict[str, Any]] = None # type: ignore[assignment]\n\n def __post_init__(self) -> None:\n if self.behavior_log is None:\n self.behavior_log = []\n\n def checksum(self) -> str:\n try:\n payload = {\n \"root\": self.root,\n \"mods\": sorted(list(self.candidates_modules)),\n \"files\": sorted(list(self.candidates_files)),\n \"cites\": sorted([(p, int(a), int(b)) for (p, a, b) in self.citations]),\n \"H\": float(self.H),\n \"vec_w\": float(self.vec_weight),\n \"vec_n\": int(len(self.vec) if isinstance(self.vec, list) else 0),\n }\n raw = json.dumps(payload, sort_keys=True).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(raw).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _ensure_dir(p: str) -> None:\n try:\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n\n\ndef _load_json(path: str) -> Dict[str, Any]:\n try:\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n return json.loads(fh.read())\n except Exception:\n return {}\n\n\ndef _save_json(path: str, obj: Dict[str, Any]) -> None:\n _ensure_dir(path)\n try:","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner_core","uri":"program://Program_Conditioned_Adapter/module/modules.runner_core#L1-L37","kind":"module","name":"modules.runner_core","path":"modules/runner_core.py","language":"python","start_line":1,"end_line":37,"context_start_line":1,"context_end_line":37,"code":"from __future__ import annotations\n\nfrom typing import Dict, List, Any, Tuple\n\nfrom .program_graph import ProgramGraph, Entity\nfrom .retrieval_policy import RetrievalPolicy\nfrom .citations import CitationManager, CitationPolicy\n\n\ndef select_region(query: str, pg: ProgramGraph, policy: RetrievalPolicy, top_k: int = 16) -> List[str]:\n scores = policy.score_entities(query, pg)\n ids = sorted(scores.keys(), key=lambda k: scores.get(k, 0.0), reverse=True)[:max(1, int(top_k))]\n return ids\n\n\ndef prepare_citations(units: List[Dict[str, Any]], region_entity_ids: List[str], pg: ProgramGraph, citations_policy: Dict[str, Any], manifest: Dict[str, Any]) -> List[Dict[str, Any]]:\n cm = CitationManager(\n policy=CitationPolicy(\n enforce=bool(citations_policy.get(\"enforce\", True)),\n per_paragraph=bool(citations_policy.get(\"per_paragraph\", False)),\n repair=bool(citations_policy.get(\"repair\", True)),\n ),\n pg=pg,\n manifest=manifest or {},\n )\n # Collect baseline evidence for the region and stamp\n baseline = cm.collect(region_entity_ids, contexts=[])\n out: List[Dict[str, Any]] = []\n for u in units:\n unit = dict(u)\n unit.setdefault(\"evidence\", baseline[:4])\n unit = cm.enforce([unit])[0]\n unit = cm.stamp_provenance(unit)\n out.append(unit)\n return out\n\n","source_hash":"b602b689f9597f4953f5e8fb2d98e89141b2eeeb7e2ac5b723a8c919e3ffeac2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner_core.select_region","uri":"program://Program_Conditioned_Adapter/function/modules.runner_core.select_region#L10-L13","kind":"function","name":"select_region","path":"modules/runner_core.py","language":"python","start_line":10,"end_line":13,"context_start_line":1,"context_end_line":33,"code":"from __future__ import annotations\n\nfrom typing import Dict, List, Any, Tuple\n\nfrom .program_graph import ProgramGraph, Entity\nfrom .retrieval_policy import RetrievalPolicy\nfrom .citations import CitationManager, CitationPolicy\n\n\ndef select_region(query: str, pg: ProgramGraph, policy: RetrievalPolicy, top_k: int = 16) -> List[str]:\n scores = policy.score_entities(query, pg)\n ids = sorted(scores.keys(), key=lambda k: scores.get(k, 0.0), reverse=True)[:max(1, int(top_k))]\n return ids\n\n\ndef prepare_citations(units: List[Dict[str, Any]], region_entity_ids: List[str], pg: ProgramGraph, citations_policy: Dict[str, Any], manifest: Dict[str, Any]) -> List[Dict[str, Any]]:\n cm = CitationManager(\n policy=CitationPolicy(\n enforce=bool(citations_policy.get(\"enforce\", True)),\n per_paragraph=bool(citations_policy.get(\"per_paragraph\", False)),\n repair=bool(citations_policy.get(\"repair\", True)),\n ),\n pg=pg,\n manifest=manifest or {},\n )\n # Collect baseline evidence for the region and stamp\n baseline = cm.collect(region_entity_ids, contexts=[])\n out: List[Dict[str, Any]] = []\n for u in units:\n unit = dict(u)\n unit.setdefault(\"evidence\", baseline[:4])\n unit = cm.enforce([unit])[0]\n unit = cm.stamp_provenance(unit)","source_hash":"b602b689f9597f4953f5e8fb2d98e89141b2eeeb7e2ac5b723a8c919e3ffeac2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner_core.prepare_citations","uri":"program://Program_Conditioned_Adapter/function/modules.runner_core.prepare_citations#L16-L35","kind":"function","name":"prepare_citations","path":"modules/runner_core.py","language":"python","start_line":16,"end_line":35,"context_start_line":1,"context_end_line":37,"code":"from __future__ import annotations\n\nfrom typing import Dict, List, Any, Tuple\n\nfrom .program_graph import ProgramGraph, Entity\nfrom .retrieval_policy import RetrievalPolicy\nfrom .citations import CitationManager, CitationPolicy\n\n\ndef select_region(query: str, pg: ProgramGraph, policy: RetrievalPolicy, top_k: int = 16) -> List[str]:\n scores = policy.score_entities(query, pg)\n ids = sorted(scores.keys(), key=lambda k: scores.get(k, 0.0), reverse=True)[:max(1, int(top_k))]\n return ids\n\n\ndef prepare_citations(units: List[Dict[str, Any]], region_entity_ids: List[str], pg: ProgramGraph, citations_policy: Dict[str, Any], manifest: Dict[str, Any]) -> List[Dict[str, Any]]:\n cm = CitationManager(\n policy=CitationPolicy(\n enforce=bool(citations_policy.get(\"enforce\", True)),\n per_paragraph=bool(citations_policy.get(\"per_paragraph\", False)),\n repair=bool(citations_policy.get(\"repair\", True)),\n ),\n pg=pg,\n manifest=manifest or {},\n )\n # Collect baseline evidence for the region and stamp\n baseline = cm.collect(region_entity_ids, contexts=[])\n out: List[Dict[str, Any]] = []\n for u in units:\n unit = dict(u)\n unit.setdefault(\"evidence\", baseline[:4])\n unit = cm.enforce([unit])[0]\n unit = cm.stamp_provenance(unit)\n out.append(unit)\n return out\n\n","source_hash":"b602b689f9597f4953f5e8fb2d98e89141b2eeeb7e2ac5b723a8c919e3ffeac2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.interpret","uri":"program://Program_Conditioned_Adapter/module/modules.interpret#L1-L79","kind":"module","name":"modules.interpret","path":"modules/interpret.py","language":"python","start_line":1,"end_line":79,"context_start_line":1,"context_end_line":79,"code":"# ActivationTracer hooks, capture pipeline, write JSON (lift from run_repo_adapter.py)\n\nfrom typing import Dict, Tuple\nimport torch\n\n\n\ndef is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef block_out_hook(_key: str, _m: torch.nn.Module, _inputs: Tuple[torch.Tensor, ...], output: torch.Tensor) -> torch.Tensor | None:\n try:\n if isinstance(output, torch.Tensor):\n return output\n if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None\n v = getattr(output, \"hidden_states\", None) or getattr(output, \"last_hidden_state\", None)\n return v if isinstance(v, torch.Tensor) else None\n except Exception:\n return None\n\n\ndef truncate_batch(xx: Dict[str, torch.Tensor], max_tokens: int = 512) -> Dict[str, torch.Tensor]:\n max_t = max(8, int(max_tokens))\n out: Dict[str, torch.Tensor] = {}\n for k, v in xx.items():\n if isinstance(v, torch.Tensor) and v.dim() >= 2 and v.size(1) > max_t:\n out[k] = v[:, -max_t:]\n else:\n out[k] = v\n return out\n\n \ndef get_W(m: torch.nn.Module) -> torch.Tensor:\n if hasattr(m, \"lm_head\") and hasattr(m.lm_head, \"weight\"):\n return m.lm_head.weight\n if hasattr(m, \"get_output_embeddings\"):\n we = m.get_output_embeddings()\n if we is not None and hasattr(we, \"weight\"):\n return we.weight\n raise AttributeError(\"No output projection found (lm_head or embeddings)\")\n\n\"\"\"\ndef _is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef _block_out_hook(_key, _m, _inputs, output):\n try:\n if isinstance(output, torch.Tensor):\n return output\n if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None\n v = getattr(output, \"hidden_states\", None) or getattr(output, \"last_hidden_state\", None)\n return v if isinstance(v, torch.Tensor) else None\n except Exception:\n return None\n\n\ndef _truncate_batch(xx):\n max_t = max(8, int(args.interpret_tokens))\n out = {}\n for k, v in xx.items():\n if isinstance(v, torch.Tensor) and v.dim() >= 2 and v.size(1) > max_t:\n out[k] = v[:, -max_t:]\n else:\n out[k] = v\n return out\n\n\"\"\"","source_hash":"c5a667f6e9c66d400206c5946e5f83a9140f312b84b3ce2df8486feec5d279a9","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.interpret.is_block","uri":"program://Program_Conditioned_Adapter/function/modules.interpret.is_block#L8-L12","kind":"function","name":"is_block","path":"modules/interpret.py","language":"python","start_line":8,"end_line":12,"context_start_line":1,"context_end_line":32,"code":"# ActivationTracer hooks, capture pipeline, write JSON (lift from run_repo_adapter.py)\n\nfrom typing import Dict, Tuple\nimport torch\n\n\n\ndef is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef block_out_hook(_key: str, _m: torch.nn.Module, _inputs: Tuple[torch.Tensor, ...], output: torch.Tensor) -> torch.Tensor | None:\n try:\n if isinstance(output, torch.Tensor):\n return output\n if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None\n v = getattr(output, \"hidden_states\", None) or getattr(output, \"last_hidden_state\", None)\n return v if isinstance(v, torch.Tensor) else None\n except Exception:\n return None\n\n\ndef truncate_batch(xx: Dict[str, torch.Tensor], max_tokens: int = 512) -> Dict[str, torch.Tensor]:\n max_t = max(8, int(max_tokens))\n out: Dict[str, torch.Tensor] = {}\n for k, v in xx.items():\n if isinstance(v, torch.Tensor) and v.dim() >= 2 and v.size(1) > max_t:","source_hash":"c5a667f6e9c66d400206c5946e5f83a9140f312b84b3ce2df8486feec5d279a9","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.interpret.block_out_hook","uri":"program://Program_Conditioned_Adapter/function/modules.interpret.block_out_hook#L15-L25","kind":"function","name":"block_out_hook","path":"modules/interpret.py","language":"python","start_line":15,"end_line":25,"context_start_line":1,"context_end_line":45,"code":"# ActivationTracer hooks, capture pipeline, write JSON (lift from run_repo_adapter.py)\n\nfrom typing import Dict, Tuple\nimport torch\n\n\n\ndef is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef block_out_hook(_key: str, _m: torch.nn.Module, _inputs: Tuple[torch.Tensor, ...], output: torch.Tensor) -> torch.Tensor | None:\n try:\n if isinstance(output, torch.Tensor):\n return output\n if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None\n v = getattr(output, \"hidden_states\", None) or getattr(output, \"last_hidden_state\", None)\n return v if isinstance(v, torch.Tensor) else None\n except Exception:\n return None\n\n\ndef truncate_batch(xx: Dict[str, torch.Tensor], max_tokens: int = 512) -> Dict[str, torch.Tensor]:\n max_t = max(8, int(max_tokens))\n out: Dict[str, torch.Tensor] = {}\n for k, v in xx.items():\n if isinstance(v, torch.Tensor) and v.dim() >= 2 and v.size(1) > max_t:\n out[k] = v[:, -max_t:]\n else:\n out[k] = v\n return out\n\n \ndef get_W(m: torch.nn.Module) -> torch.Tensor:\n if hasattr(m, \"lm_head\") and hasattr(m.lm_head, \"weight\"):\n return m.lm_head.weight\n if hasattr(m, \"get_output_embeddings\"):\n we = m.get_output_embeddings()\n if we is not None and hasattr(we, \"weight\"):\n return we.weight","source_hash":"c5a667f6e9c66d400206c5946e5f83a9140f312b84b3ce2df8486feec5d279a9","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.interpret.truncate_batch","uri":"program://Program_Conditioned_Adapter/function/modules.interpret.truncate_batch#L28-L36","kind":"function","name":"truncate_batch","path":"modules/interpret.py","language":"python","start_line":28,"end_line":36,"context_start_line":8,"context_end_line":56,"code":"def is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef block_out_hook(_key: str, _m: torch.nn.Module, _inputs: Tuple[torch.Tensor, ...], output: torch.Tensor) -> torch.Tensor | None:\n try:\n if isinstance(output, torch.Tensor):\n return output\n if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None\n v = getattr(output, \"hidden_states\", None) or getattr(output, \"last_hidden_state\", None)\n return v if isinstance(v, torch.Tensor) else None\n except Exception:\n return None\n\n\ndef truncate_batch(xx: Dict[str, torch.Tensor], max_tokens: int = 512) -> Dict[str, torch.Tensor]:\n max_t = max(8, int(max_tokens))\n out: Dict[str, torch.Tensor] = {}\n for k, v in xx.items():\n if isinstance(v, torch.Tensor) and v.dim() >= 2 and v.size(1) > max_t:\n out[k] = v[:, -max_t:]\n else:\n out[k] = v\n return out\n\n \ndef get_W(m: torch.nn.Module) -> torch.Tensor:\n if hasattr(m, \"lm_head\") and hasattr(m.lm_head, \"weight\"):\n return m.lm_head.weight\n if hasattr(m, \"get_output_embeddings\"):\n we = m.get_output_embeddings()\n if we is not None and hasattr(we, \"weight\"):\n return we.weight\n raise AttributeError(\"No output projection found (lm_head or embeddings)\")\n\n\"\"\"\ndef _is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef _block_out_hook(_key, _m, _inputs, output):","source_hash":"c5a667f6e9c66d400206c5946e5f83a9140f312b84b3ce2df8486feec5d279a9","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.interpret.get_W","uri":"program://Program_Conditioned_Adapter/function/modules.interpret.get_W#L39-L46","kind":"function","name":"get_W","path":"modules/interpret.py","language":"python","start_line":39,"end_line":46,"context_start_line":19,"context_end_line":66,"code":" if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None\n v = getattr(output, \"hidden_states\", None) or getattr(output, \"last_hidden_state\", None)\n return v if isinstance(v, torch.Tensor) else None\n except Exception:\n return None\n\n\ndef truncate_batch(xx: Dict[str, torch.Tensor], max_tokens: int = 512) -> Dict[str, torch.Tensor]:\n max_t = max(8, int(max_tokens))\n out: Dict[str, torch.Tensor] = {}\n for k, v in xx.items():\n if isinstance(v, torch.Tensor) and v.dim() >= 2 and v.size(1) > max_t:\n out[k] = v[:, -max_t:]\n else:\n out[k] = v\n return out\n\n \ndef get_W(m: torch.nn.Module) -> torch.Tensor:\n if hasattr(m, \"lm_head\") and hasattr(m.lm_head, \"weight\"):\n return m.lm_head.weight\n if hasattr(m, \"get_output_embeddings\"):\n we = m.get_output_embeddings()\n if we is not None and hasattr(we, \"weight\"):\n return we.weight\n raise AttributeError(\"No output projection found (lm_head or embeddings)\")\n\n\"\"\"\ndef _is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef _block_out_hook(_key, _m, _inputs, output):\n try:\n if isinstance(output, torch.Tensor):\n return output\n if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None\n v = getattr(output, \"hidden_states\", None) or getattr(output, \"last_hidden_state\", None)\n return v if isinstance(v, torch.Tensor) else None\n except Exception:\n return None","source_hash":"c5a667f6e9c66d400206c5946e5f83a9140f312b84b3ce2df8486feec5d279a9","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.peft","uri":"program://Program_Conditioned_Adapter/module/modules.peft#L1-L65","kind":"module","name":"modules.peft","path":"modules/peft.py","language":"python","start_line":1,"end_line":65,"context_start_line":1,"context_end_line":65,"code":"from typing import Dict, Any, List, Optional\nimport os\nimport re\nfrom model.inspect import detect_target_names_from_model_full\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\ndef save_peft_like(out_dir: str, adapters: Dict[str, Any], *, r: int, alpha: int, target_modules: List[str], bias: str = \"none\", int8: bool = False, target_paths: Optional[Dict[str, str]] = None) -> None:\n \"\"\"Write a minimal PEFT LoRA config + tensors for quick benchmarking.\n\n Note: This is a best-effort exporter; users may still need to map names depending on the model arch.\n \"\"\"\n try:\n import json as _json\n cfg = {\n \"peft_type\": \"LORA\",\n \"r\": int(r),\n \"lora_alpha\": int(alpha),\n \"target_modules\": target_modules,\n \"lora_dropout\": 0.0,\n \"bias\": str(bias),\n \"task_type\": \"CAUSAL_LM\",\n }\n open(os.path.join(out_dir, \"adapter_config.json\"), \"w\", encoding=\"utf-8\").write(_json.dumps(cfg, indent=2))\n # Save tensors in a stable torch format if available\n try:\n import torch as _torch # type: ignore\n\n state: Dict[str, Any] = {}\n def _map_path(i: int, name: str) -> str:\n # Map target name to likely module path; best-effort for LLaMA-like arch\n if target_paths and name in target_paths:\n return f\"base_model.model.{target_paths[name]}\"\n if name in (\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"):\n return f\"base_model.model.model.layers.{i}.self_attn.{name}\"\n elif name in (\"up_proj\", \"down_proj\", \"gate_proj\"):\n return f\"base_model.model.model.layers.{i}.mlp.{name}\"\n else:\n return f\"base_model.model.model.layers.{i}.{name}\"\n for i, layer in enumerate(adapters[\"layers\"]):\n for name, tensors in layer.items():\n base = _map_path(i, name)\n A = _torch.from_numpy(tensors[\"A\"]).contiguous()\n B = _torch.from_numpy(tensors[\"B\"]).contiguous()\n if int8:\n try:\n # Per-tensor affine quantization\n scale_A = float(A.abs().max().item() / 127.0) if A.numel() > 0 else 1.0\n A = _torch.quantize_per_tensor(A, scale=max(scale_A, 1e-8), zero_point=0, dtype=_torch.qint8)\n scale_B = float(B.abs().max().item() / 127.0) if B.numel() > 0 else 1.0\n B = _torch.quantize_per_tensor(B, scale=max(scale_B, 1e-8), zero_point=0, dtype=_torch.qint8)\n except Exception:\n pass\n state[f\"{base}.lora_A.weight\"] = A\n state[f\"{base}.lora_B.weight\"] = B\n _torch.save(state, os.path.join(out_dir, \"adapter_model.bin\"))\n except Exception:\n pass\n except Exception:\n pass\n","source_hash":"72e27cf7c5a21b205beb636007c461e0551496d8682fc27ece0a53abf0c30e4a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.peft.infer_target_names","uri":"program://Program_Conditioned_Adapter/function/modules.peft.infer_target_names#L6-L10","kind":"function","name":"infer_target_names","path":"modules/peft.py","language":"python","start_line":6,"end_line":10,"context_start_line":1,"context_end_line":30,"code":"from typing import Dict, Any, List, Optional\nimport os\nimport re\nfrom model.inspect import detect_target_names_from_model_full\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\ndef save_peft_like(out_dir: str, adapters: Dict[str, Any], *, r: int, alpha: int, target_modules: List[str], bias: str = \"none\", int8: bool = False, target_paths: Optional[Dict[str, str]] = None) -> None:\n \"\"\"Write a minimal PEFT LoRA config + tensors for quick benchmarking.\n\n Note: This is a best-effort exporter; users may still need to map names depending on the model arch.\n \"\"\"\n try:\n import json as _json\n cfg = {\n \"peft_type\": \"LORA\",\n \"r\": int(r),\n \"lora_alpha\": int(alpha),\n \"target_modules\": target_modules,\n \"lora_dropout\": 0.0,\n \"bias\": str(bias),\n \"task_type\": \"CAUSAL_LM\",\n }\n open(os.path.join(out_dir, \"adapter_config.json\"), \"w\", encoding=\"utf-8\").write(_json.dumps(cfg, indent=2))\n # Save tensors in a stable torch format if available\n try:","source_hash":"72e27cf7c5a21b205beb636007c461e0551496d8682fc27ece0a53abf0c30e4a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.peft.save_peft_like","uri":"program://Program_Conditioned_Adapter/function/modules.peft.save_peft_like#L12-L64","kind":"function","name":"save_peft_like","path":"modules/peft.py","language":"python","start_line":12,"end_line":64,"context_start_line":1,"context_end_line":65,"code":"from typing import Dict, Any, List, Optional\nimport os\nimport re\nfrom model.inspect import detect_target_names_from_model_full\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\ndef save_peft_like(out_dir: str, adapters: Dict[str, Any], *, r: int, alpha: int, target_modules: List[str], bias: str = \"none\", int8: bool = False, target_paths: Optional[Dict[str, str]] = None) -> None:\n \"\"\"Write a minimal PEFT LoRA config + tensors for quick benchmarking.\n\n Note: This is a best-effort exporter; users may still need to map names depending on the model arch.\n \"\"\"\n try:\n import json as _json\n cfg = {\n \"peft_type\": \"LORA\",\n \"r\": int(r),\n \"lora_alpha\": int(alpha),\n \"target_modules\": target_modules,\n \"lora_dropout\": 0.0,\n \"bias\": str(bias),\n \"task_type\": \"CAUSAL_LM\",\n }\n open(os.path.join(out_dir, \"adapter_config.json\"), \"w\", encoding=\"utf-8\").write(_json.dumps(cfg, indent=2))\n # Save tensors in a stable torch format if available\n try:\n import torch as _torch # type: ignore\n\n state: Dict[str, Any] = {}\n def _map_path(i: int, name: str) -> str:\n # Map target name to likely module path; best-effort for LLaMA-like arch\n if target_paths and name in target_paths:\n return f\"base_model.model.{target_paths[name]}\"\n if name in (\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"):\n return f\"base_model.model.model.layers.{i}.self_attn.{name}\"\n elif name in (\"up_proj\", \"down_proj\", \"gate_proj\"):\n return f\"base_model.model.model.layers.{i}.mlp.{name}\"\n else:\n return f\"base_model.model.model.layers.{i}.{name}\"\n for i, layer in enumerate(adapters[\"layers\"]):\n for name, tensors in layer.items():\n base = _map_path(i, name)\n A = _torch.from_numpy(tensors[\"A\"]).contiguous()\n B = _torch.from_numpy(tensors[\"B\"]).contiguous()\n if int8:\n try:\n # Per-tensor affine quantization\n scale_A = float(A.abs().max().item() / 127.0) if A.numel() > 0 else 1.0\n A = _torch.quantize_per_tensor(A, scale=max(scale_A, 1e-8), zero_point=0, dtype=_torch.qint8)\n scale_B = float(B.abs().max().item() / 127.0) if B.numel() > 0 else 1.0\n B = _torch.quantize_per_tensor(B, scale=max(scale_B, 1e-8), zero_point=0, dtype=_torch.qint8)\n except Exception:\n pass\n state[f\"{base}.lora_A.weight\"] = A\n state[f\"{base}.lora_B.weight\"] = B\n _torch.save(state, os.path.join(out_dir, \"adapter_model.bin\"))\n except Exception:\n pass\n except Exception:\n pass\n","source_hash":"72e27cf7c5a21b205beb636007c461e0551496d8682fc27ece0a53abf0c30e4a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.peft._map_path","uri":"program://Program_Conditioned_Adapter/function/modules.peft._map_path#L34-L43","kind":"function","name":"_map_path","path":"modules/peft.py","language":"python","start_line":34,"end_line":43,"context_start_line":14,"context_end_line":63,"code":"\n Note: This is a best-effort exporter; users may still need to map names depending on the model arch.\n \"\"\"\n try:\n import json as _json\n cfg = {\n \"peft_type\": \"LORA\",\n \"r\": int(r),\n \"lora_alpha\": int(alpha),\n \"target_modules\": target_modules,\n \"lora_dropout\": 0.0,\n \"bias\": str(bias),\n \"task_type\": \"CAUSAL_LM\",\n }\n open(os.path.join(out_dir, \"adapter_config.json\"), \"w\", encoding=\"utf-8\").write(_json.dumps(cfg, indent=2))\n # Save tensors in a stable torch format if available\n try:\n import torch as _torch # type: ignore\n\n state: Dict[str, Any] = {}\n def _map_path(i: int, name: str) -> str:\n # Map target name to likely module path; best-effort for LLaMA-like arch\n if target_paths and name in target_paths:\n return f\"base_model.model.{target_paths[name]}\"\n if name in (\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"):\n return f\"base_model.model.model.layers.{i}.self_attn.{name}\"\n elif name in (\"up_proj\", \"down_proj\", \"gate_proj\"):\n return f\"base_model.model.model.layers.{i}.mlp.{name}\"\n else:\n return f\"base_model.model.model.layers.{i}.{name}\"\n for i, layer in enumerate(adapters[\"layers\"]):\n for name, tensors in layer.items():\n base = _map_path(i, name)\n A = _torch.from_numpy(tensors[\"A\"]).contiguous()\n B = _torch.from_numpy(tensors[\"B\"]).contiguous()\n if int8:\n try:\n # Per-tensor affine quantization\n scale_A = float(A.abs().max().item() / 127.0) if A.numel() > 0 else 1.0\n A = _torch.quantize_per_tensor(A, scale=max(scale_A, 1e-8), zero_point=0, dtype=_torch.qint8)\n scale_B = float(B.abs().max().item() / 127.0) if B.numel() > 0 else 1.0\n B = _torch.quantize_per_tensor(B, scale=max(scale_B, 1e-8), zero_point=0, dtype=_torch.qint8)\n except Exception:\n pass\n state[f\"{base}.lora_A.weight\"] = A\n state[f\"{base}.lora_B.weight\"] = B\n _torch.save(state, os.path.join(out_dir, \"adapter_model.bin\"))\n except Exception:\n pass\n except Exception:","source_hash":"72e27cf7c5a21b205beb636007c461e0551496d8682fc27ece0a53abf0c30e4a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner","uri":"program://Program_Conditioned_Adapter/module/modules.runner#L1-L1512","kind":"module","name":"modules.runner","path":"modules/runner.py","language":"python","start_line":1,"end_line":1512,"context_start_line":1,"context_end_line":1512,"code":"from __future__ import annotations\n\nimport os\nimport json\nimport time\nfrom typing import Optional, Tuple, List, Dict, Any, Set\n\nimport numpy as np\nimport torch\nfrom data.tokenizer import LocalLlamaTokenizer\n\nfrom modules.adapter import (\n load_adapters_npz,\n generate_lora_from_embedding,\n)\nfrom model.llama_bootstrap import build_local_llama_from_snapshot\nfrom model.hf_snapshot import ensure_snapshot\nfrom blocks.targets import targets_map\nfrom blocks.inspect import infer_target_shapes\nfrom model.inspect import detect_target_names_from_model_full\nfrom modules.mixing import (\n register_hook_mixed_adapters,\n)\nfrom modules.caches import load_manifest, load_windows_index, pick_files_from_windows\nfrom modules.citations import CitationManager, CitationPolicy\nfrom modules.interpret import (\n is_block,\n block_out_hook,\n truncate_batch,\n get_W,\n)\nfrom modules.program_state import (\n load_program_state,\n save_program_state,\n join_program_states,\n new_state_from_run,\n changed_bits,\n)\n\ndef _normalize_citations_regex():\n try:\n import re\n return re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\\\\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n except Exception:\n return None\n\ndef normalize_citations(text: str) -> List[tuple[str, int, int]]:\n rx = _normalize_citations_regex()\n if rx is None:\n return []\n out: List[tuple[str, int, int]] = []\n try:\n import os as _os\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:\n b = a\n out.append((p.replace(\"\\\\\\\\\", \"/\"), min(a, b), max(a, b)))\n return out\n except Exception:\n return []\n\ndef has_citations(s: str, per_para: bool) -> bool:\n try:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./\\\\-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s or \"\"):\n return False\n if per_para:\n paras = [p.strip() for p in (s or \"\").split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n except Exception:\n return False\n\ndef generate_answer(\n model_id: str,\n adapters_npz: str,\n prompt: str,\n *,\n program_root: Optional[str] = None,\n delta_cap: float = 0.05,\n cache_dir: Optional[str] = None,\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n device_map: str = \"none\",\n gpu_ids: Optional[str] = None,\n # selection\n of_sources: str = \"question\",\n zoom_symbol: Optional[str] = None,\n zoom_radius: int = 0,\n ignore: Optional[List[str]] = None,\n # context\n pack_context: bool = False,\n pack_mode: str = \"heads\",\n context_tokens: int = 2000,\n function_first: bool = False,\n ff_max_candidates: int = 24,\n ff_window_lines: int = 80,\n ff_threshold: float = 0.55,\n ff_noise_penalty: float = 0.30,\n # capacity/mixing\n alpha: float = 16.0,\n rank: int = 12,\n gsub: float = 0.75,\n beta: float = 0.1,\n entropy_aware: bool = False,\n rank_min: int = 8,\n rank_max: int = 32,\n gsub_min: float = 0.6,\n gsub_max: float = 0.9,\n entropy_weights: str = \"program=0.4,subgraph=0.4,question=0.2\",\n target_weights: Optional[str] = None,\n layer_schedule: bool = False,\n q_aware_weights: bool = False,\n mixture_m: int = 0,\n adapters_bank: Optional[str] = None,\n per_target_rank_schedule: bool = False,\n rank_budget: int = 0,\n ablate_attn: bool = False,\n ablate_mlp: bool = False,\n alpha_warmup: bool = False,\n adapter_aware_decoding: bool = False,\n layer_rank_tiers: bool = False,\n # cones/rounding\n # cones/rounding\n cone_rank: int = 2,\n cone_weight: float = 0.5,\n round_lora: bool = False,\n round_threshold: float = 0.5,\n # citations\n require_citations: bool = False,\n citations_per_paragraph: bool = False,\n # retrieval/rerank\n rerank: bool = True,\n self_queries_path: Optional[str] = None,\n # generation\n do_sample: bool = False,\n temperature: float = 0.7,\n top_p: float = 0.9,\n repetition_penalty: float = 1.1,\n min_new_tokens: int = 64,\n max_new_tokens: int = 256,\n kv_window: int = 0,\n head_device: str = \"same\", # \"cpu\" | \"auto\" | \"same\"\n # misc\n seed: int = 0,\n verbose: bool = False,\n # provenance footer\n commit_footer: bool = False,\n # monotone selection for non-structured path\n monotone_selection: bool = False,\n) -> str:\n # Program root (path or URI) for reading artifacts/windows\n root = str(program_root) if (program_root is not None and str(program_root).strip()) else \"\"\n if gpu_ids and str(gpu_ids).strip():\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_ids).strip()\n os.environ.setdefault(\"PYTORCH_CUDA_ALLOC_CONF\", \"expandable_segments:True\")\n\n # Seed\n try:\n import random\n random.seed(int(seed))\n np.random.seed(int(seed))\n torch.manual_seed(int(seed))\n except Exception:\n pass\n\n # Prefer explicit cache_dir; else env; else project root (/..../checkpoints)\n if not cache_dir:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n if env_cache:\n cache_dir = env_cache\n else:\n mod_dir = os.path.dirname(__file__)\n proj_root = os.path.abspath(os.path.join(mod_dir, \"..\", \"..\", \"..\"))\n cache_dir = os.path.join(proj_root, \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers)\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n def _tok_encode(text: str) -> List[int]:\n return tok_local.encode(text)\n def _tok_len(text: str) -> int:\n try:\n return len(tok_local.encode(text))\n except Exception:\n return 0\n def _tok_decode(ids: torch.Tensor | List[int]) -> str:\n if isinstance(ids, torch.Tensor):\n try:\n ids_l = ids.to(\"cpu\").tolist()\n except Exception:\n ids_l = []\n else:\n ids_l = list(ids)\n try:\n return tok_local.decode(ids_l, skip_special_tokens=True)\n except Exception:\n return tok_local.decode(ids_l)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n # Load base adapters\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n\n # Selection via caches (program-agnostic): prefer windows with prompt overlap\n files: List[str] = []\n try:\n adapters_dir = os.path.dirname(adapters_npz)\n windows = load_windows_index(adapters_dir)\n files = pick_files_from_windows(root, windows, prompt, k=max(8, int(ff_max_candidates)))\n except Exception:\n files = []\n modules: List[str] = []\n\n # Subgraph embedding\n # Program-agnostic: skip subgraph embedding if no backend embedder; use base only\n sub = {\"layers\": []}\n\n # Shapes and targets\n tmap = targets_map(\"local\")\n t_shapes = infer_target_shapes(model)\n num_layers = len(getattr(model, \"blocks\", []))\n d_model_local = int(t_shapes.get(\"q_proj\", (0, 0))[0]) or int(getattr(getattr(model, \"cfg\", None), \"d_model\", 0) or 0)\n\n # Entropy-aware capacity\n scaled_rank = int(rank)\n scaled_gsub = float(gsub)\n entropy_diag: Optional[Dict[str, float]] = None\n\n # No subgraph z available -> keep sub empty (base-only mixing)\n sub = {\"layers\": []}\n\n # Function-first cones: build and merge\n cr = max(0, int(cone_rank))\n # Skip function-first cones in program-agnostic core (requires backend embedder)\n\n # Optional mixture bank: mix top-m module adapters from bank by concatenation (Σ π_i Δθ_i)\n if adapters_bank and int(mixture_m) > 0:\n try:\n import glob\n bank_root = os.path.abspath(os.path.expanduser(os.path.expandvars(adapters_bank)))\n sel_mods = [m for m in (modules or []) if m]\n picked = 0\n for mod in sel_mods:\n if picked >= int(mixture_m):\n break\n mod_dir1 = os.path.join(bank_root, \"sub_adapters\", mod.replace(\"/\", \"_\"))\n mod_dir2 = os.path.join(bank_root, mod.replace(\"/\", \"_\"))\n cand = None\n for d in (mod_dir1, mod_dir2):\n fp = os.path.join(d, \"adapters.npz\")\n if os.path.isfile(fp):\n cand = fp\n break\n if not cand:\n continue\n try:\n bank_ad = load_adapters_npz(cand)\n except Exception:\n continue\n # uniform π for now\n w = 1.0 / float(min(len(sel_mods), int(mixture_m)))\n sw = float(max(0.0, min(1.0, w))) ** 0.5\n # per-layer concat\n merged_layers = []\n for i in range(num_layers):\n baseL = sub[\"layers\"][i]\n bL = bank_ad[\"layers\"][i] if i < len(bank_ad.get(\"layers\", [])) else {}\n dst: Dict[str, Dict[str, np.ndarray]] = {}\n for name in tmap.keys():\n if (name in baseL) and (name in bL):\n A1 = baseL[name][\"A\"]; B1 = baseL[name][\"B\"]\n A2 = (sw * bL[name][\"A\"]).astype(np.float32); B2 = (sw * bL[name][\"B\"]).astype(np.float32)\n A = np.concatenate([A1, A2], axis=1)\n B = np.concatenate([B1, B2], axis=0)\n dst[name] = {\"A\": A, \"B\": B, \"gate\": baseL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n elif name in baseL:\n dst[name] = baseL[name]\n elif name in bL:\n A = (sw * bL[name][\"A\"]).astype(np.float32)\n dst[name] = {\"A\": A, \"B\": bL[name][\"B\"], \"gate\": bL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n merged_layers.append(dst)\n sub = {\"layers\": merged_layers}\n # try infer rank increment from a present target\n try:\n for i in range(num_layers):\n any_name = next((n for n in tmap.keys() if n in bank_ad[\"layers\"][i]), None)\n if any_name:\n inc = int(bank_ad[\"layers\"][i][any_name][\"B\"].shape[0])\n scaled_rank = int(scaled_rank + inc)\n break\n except Exception:\n pass\n picked += 1\n except Exception:\n pass\n\n # Optional rounding of LoRA factors\n if bool(round_lora):\n try:\n thr = float(max(0.0, round_threshold))\n for i in range(len(sub.get(\"layers\", []))):\n for name, tensors in sub[\"layers\"][i].items():\n for key in (\"A\", \"B\"):\n arr = tensors.get(key)\n if not isinstance(arr, np.ndarray):\n continue\n q = float(np.median(np.abs(arr))) if arr.size > 0 else 0.0\n if q <= 0:\n continue\n out = np.where(np.abs(arr) < (thr * q), 0.0, np.sign(arr) * q).astype(np.float32)\n tensors[key] = out\n except Exception:\n pass\n\n # Final prompt with optional context\n final_prompt = prompt\n if bool(pack_context) and files:\n adapters_dir = os.path.dirname(adapters_npz)\n mf = load_manifest(adapters_dir)\n # Simple windows-based packing: use windows_index ranges, read from program root\n try:\n windows = load_windows_index(adapters_dir)\n except Exception:\n windows = []\n lines_out: List[str] = [\"Program windows:\"]\n used = 0\n budget = int(context_tokens)\n # Choose up to one top window per selected file\n for rel in files:\n rel_n = rel.replace(\"\\\\\", \"/\")\n cand = [w for w in windows if (w.get(\"path\") or \"\").replace(\"\\\\\", \"/\") == rel_n]\n if not cand:\n continue\n w = cand[0]\n a = int(w.get(\"start_line\", 1))\n b = int(w.get(\"end_line\", max(a, a + 60)))\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n a = max(1, min(a, len(src_lines)))\n b = max(a, min(b, len(src_lines)))\n block = [f\"[ctx] path: {rel}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text_block = \"\\n\".join(block) + \"\\n\"\n t = _tok_len(text_block)\n if used + t > budget:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget:\n break\n packed = \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n if packed:\n final_prompt = packed + \"\\n\\n\" + final_prompt\n if require_citations:\n # Tailor the example path to the first selected file for better compliance\n try:\n example_rel = files[0] if files else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n final_prompt\n + f\"\\n\\nInstruction: For EVERY claim, append a citation of the form {example_path}:START-END.\\n\"\n \"Use only files shown in [ctx] above. Provide at least 3 citations overall.\\n\"\n )\n\n # Adapter-aware decoding: pointer-first nudge when citations are required\n if bool(adapter_aware_decoding) and bool(require_citations):\n try:\n example_rel = files[0] if files else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n f\"[pointer-first] Start with a citation like [{example_path}:A-B], then explain.\\n\\n\"\n + final_prompt\n )\n ids = torch.tensor([_tok_encode(final_prompt)], dtype=torch.long, device=device)\n x = {\"input_ids\": ids}\n\n # Apply mixed adapters\n hooks = []\n def _parse_target_weights(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n for part in str(spec).split(\",\"):\n part = part.strip()\n if not part:\n continue\n if \"=\" in part:\n k, v = part.split(\"=\", 1)\n k = k.strip()\n try:\n out[k] = float(v)\n except Exception:\n continue\n else:\n out[part] = 1.0\n return out or None\n except Exception:\n return None\n tw = _parse_target_weights(target_weights) or {}\n # Optional question-aware reweighting\n if bool(q_aware_weights):\n try:\n ql = str(prompt).lower()\n mul: Dict[str, float] = {}\n if any(k in ql for k in [\"signature\", \"param\", \"argument\", \"type\", \"prototype\"]):\n mul.update({\"o_proj\": 1.10, \"v_proj\": 1.08})\n if any(k in ql for k in [\"why\", \"fail\", \"error\", \"behavior\", \"incorrect\", \"bug\"]):\n mul.update({\"up_proj\": 1.06, \"down_proj\": 1.05, \"gate_proj\": 1.04})\n if any(k in ql for k in [\"where\", \"defined\", \"definition\", \"locate\", \"find\"]):\n mul.update({\"q_proj\": 1.03})\n for k, m in mul.items():\n tw[k] = float(tw.get(k, 1.0)) * float(m)\n except Exception:\n pass\n # Optional ablations\n if bool(ablate_attn):\n for k in (\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"):\n tw[k] = 0.0\n if bool(ablate_mlp):\n for k in (\"up_proj\", \"down_proj\", \"gate_proj\"):\n tw[k] = 0.0\n # Optional per-layer schedule (gentle rise toward top third)\n layer_multipliers: Optional[List[float]] = None\n if bool(layer_schedule):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n layer_multipliers = []\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n if frac < (1.0 / 3.0):\n lm = 0.95 + 0.15 * (frac / (1.0 / 3.0))\n elif frac < (2.0 / 3.0):\n lm = 1.05 + 0.05 * ((frac - (1.0 / 3.0)) / (1.0 / 3.0))\n else:\n lm = 1.10 + 0.05 * ((frac - (2.0 / 3.0)) / (1.0 / 3.0))\n layer_multipliers.append(float(lm))\n except Exception:\n layer_multipliers = None\n # Optional per-target rank trimming (global per target; optional budget)\n per_target_keep: Optional[Dict[str, int]] = None\n if bool(per_target_rank_schedule):\n try:\n # Heuristic fractions by target group\n base_frac: Dict[str, float] = {\n \"o_proj\": 1.00, \"up_proj\": 1.00, \"down_proj\": 0.90, \"gate_proj\": 0.80,\n \"q_proj\": 0.70, \"k_proj\": 0.65, \"v_proj\": 0.60,\n }\n per_target_keep = {}\n for t, frac in base_frac.items():\n keep = int(max(1, min(int(scaled_rank), round(int(scaled_rank) * float(frac)))))\n per_target_keep[t] = keep\n # Apply global per-layer rank budget if requested\n try:\n budget = int(max(0, int(rank_budget)))\n except Exception:\n budget = 0\n if budget > 0 and per_target_keep:\n total = int(sum(int(v) for v in per_target_keep.values()))\n if total > budget:\n scale = float(budget) / float(max(1, total))\n for t in list(per_target_keep.keys()):\n per_target_keep[t] = int(max(1, round(int(per_target_keep[t]) * scale)))\n except Exception:\n per_target_keep = None\n alpha_used = float(alpha * (0.5 if bool(alpha_warmup) else 1.0))\n # Optional layer-tiered per-target keeps\n per_target_keep_layers: Optional[List[Dict[str, int]]] = None\n if bool(layer_rank_tiers):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n per_target_keep_layers = []\n def tier_for(frac: float) -> str:\n if frac < (1.0/3.0): return \"low\"\n if frac < (2.0/3.0): return \"mid\"\n return \"top\"\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n tier = tier_for(frac)\n # desired keeps by group\n if tier == \"low\":\n# ... truncated ...","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._normalize_citations_regex","uri":"program://Program_Conditioned_Adapter/function/modules.runner._normalize_citations_regex#L40-L45","kind":"function","name":"_normalize_citations_regex","path":"modules/runner.py","language":"python","start_line":40,"end_line":45,"context_start_line":20,"context_end_line":65,"code":"from model.inspect import detect_target_names_from_model_full\nfrom modules.mixing import (\n register_hook_mixed_adapters,\n)\nfrom modules.caches import load_manifest, load_windows_index, pick_files_from_windows\nfrom modules.citations import CitationManager, CitationPolicy\nfrom modules.interpret import (\n is_block,\n block_out_hook,\n truncate_batch,\n get_W,\n)\nfrom modules.program_state import (\n load_program_state,\n save_program_state,\n join_program_states,\n new_state_from_run,\n changed_bits,\n)\n\ndef _normalize_citations_regex():\n try:\n import re\n return re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\\\\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n except Exception:\n return None\n\ndef normalize_citations(text: str) -> List[tuple[str, int, int]]:\n rx = _normalize_citations_regex()\n if rx is None:\n return []\n out: List[tuple[str, int, int]] = []\n try:\n import os as _os\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:\n b = a\n out.append((p.replace(\"\\\\\\\\\", \"/\"), min(a, b), max(a, b)))\n return out","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner.normalize_citations","uri":"program://Program_Conditioned_Adapter/function/modules.runner.normalize_citations#L47-L67","kind":"function","name":"normalize_citations","path":"modules/runner.py","language":"python","start_line":47,"end_line":67,"context_start_line":27,"context_end_line":87,"code":" is_block,\n block_out_hook,\n truncate_batch,\n get_W,\n)\nfrom modules.program_state import (\n load_program_state,\n save_program_state,\n join_program_states,\n new_state_from_run,\n changed_bits,\n)\n\ndef _normalize_citations_regex():\n try:\n import re\n return re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\\\\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n except Exception:\n return None\n\ndef normalize_citations(text: str) -> List[tuple[str, int, int]]:\n rx = _normalize_citations_regex()\n if rx is None:\n return []\n out: List[tuple[str, int, int]] = []\n try:\n import os as _os\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:\n b = a\n out.append((p.replace(\"\\\\\\\\\", \"/\"), min(a, b), max(a, b)))\n return out\n except Exception:\n return []\n\ndef has_citations(s: str, per_para: bool) -> bool:\n try:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./\\\\-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s or \"\"):\n return False\n if per_para:\n paras = [p.strip() for p in (s or \"\").split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n except Exception:\n return False\n\ndef generate_answer(\n model_id: str,\n adapters_npz: str,\n prompt: str,\n *,\n program_root: Optional[str] = None,","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner.has_citations","uri":"program://Program_Conditioned_Adapter/function/modules.runner.has_citations#L69-L80","kind":"function","name":"has_citations","path":"modules/runner.py","language":"python","start_line":69,"end_line":80,"context_start_line":49,"context_end_line":100,"code":" if rx is None:\n return []\n out: List[tuple[str, int, int]] = []\n try:\n import os as _os\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:\n b = a\n out.append((p.replace(\"\\\\\\\\\", \"/\"), min(a, b), max(a, b)))\n return out\n except Exception:\n return []\n\ndef has_citations(s: str, per_para: bool) -> bool:\n try:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./\\\\-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s or \"\"):\n return False\n if per_para:\n paras = [p.strip() for p in (s or \"\").split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n except Exception:\n return False\n\ndef generate_answer(\n model_id: str,\n adapters_npz: str,\n prompt: str,\n *,\n program_root: Optional[str] = None,\n delta_cap: float = 0.05,\n cache_dir: Optional[str] = None,\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n device_map: str = \"none\",\n gpu_ids: Optional[str] = None,\n # selection\n of_sources: str = \"question\",\n zoom_symbol: Optional[str] = None,\n zoom_radius: int = 0,\n ignore: Optional[List[str]] = None,\n # context\n pack_context: bool = False,\n pack_mode: str = \"heads\",","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner.generate_answer","uri":"program://Program_Conditioned_Adapter/function/modules.runner.generate_answer#L82-L743","kind":"function","name":"generate_answer","path":"modules/runner.py","language":"python","start_line":82,"end_line":743,"context_start_line":62,"context_end_line":763,"code":" except Exception:\n b = a\n out.append((p.replace(\"\\\\\\\\\", \"/\"), min(a, b), max(a, b)))\n return out\n except Exception:\n return []\n\ndef has_citations(s: str, per_para: bool) -> bool:\n try:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./\\\\-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s or \"\"):\n return False\n if per_para:\n paras = [p.strip() for p in (s or \"\").split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n except Exception:\n return False\n\ndef generate_answer(\n model_id: str,\n adapters_npz: str,\n prompt: str,\n *,\n program_root: Optional[str] = None,\n delta_cap: float = 0.05,\n cache_dir: Optional[str] = None,\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n device_map: str = \"none\",\n gpu_ids: Optional[str] = None,\n # selection\n of_sources: str = \"question\",\n zoom_symbol: Optional[str] = None,\n zoom_radius: int = 0,\n ignore: Optional[List[str]] = None,\n # context\n pack_context: bool = False,\n pack_mode: str = \"heads\",\n context_tokens: int = 2000,\n function_first: bool = False,\n ff_max_candidates: int = 24,\n ff_window_lines: int = 80,\n ff_threshold: float = 0.55,\n ff_noise_penalty: float = 0.30,\n # capacity/mixing\n alpha: float = 16.0,\n rank: int = 12,\n gsub: float = 0.75,\n beta: float = 0.1,\n entropy_aware: bool = False,\n rank_min: int = 8,\n rank_max: int = 32,\n gsub_min: float = 0.6,\n gsub_max: float = 0.9,\n entropy_weights: str = \"program=0.4,subgraph=0.4,question=0.2\",\n target_weights: Optional[str] = None,\n layer_schedule: bool = False,\n q_aware_weights: bool = False,\n mixture_m: int = 0,\n adapters_bank: Optional[str] = None,\n per_target_rank_schedule: bool = False,\n rank_budget: int = 0,\n ablate_attn: bool = False,\n ablate_mlp: bool = False,\n alpha_warmup: bool = False,\n adapter_aware_decoding: bool = False,\n layer_rank_tiers: bool = False,\n # cones/rounding\n # cones/rounding\n cone_rank: int = 2,\n cone_weight: float = 0.5,\n round_lora: bool = False,\n round_threshold: float = 0.5,\n # citations\n require_citations: bool = False,\n citations_per_paragraph: bool = False,\n # retrieval/rerank\n rerank: bool = True,\n self_queries_path: Optional[str] = None,\n # generation\n do_sample: bool = False,\n temperature: float = 0.7,\n top_p: float = 0.9,\n repetition_penalty: float = 1.1,\n min_new_tokens: int = 64,\n max_new_tokens: int = 256,\n kv_window: int = 0,\n head_device: str = \"same\", # \"cpu\" | \"auto\" | \"same\"\n # misc\n seed: int = 0,\n verbose: bool = False,\n # provenance footer\n commit_footer: bool = False,\n # monotone selection for non-structured path\n monotone_selection: bool = False,\n) -> str:\n # Program root (path or URI) for reading artifacts/windows\n root = str(program_root) if (program_root is not None and str(program_root).strip()) else \"\"\n if gpu_ids and str(gpu_ids).strip():\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_ids).strip()\n os.environ.setdefault(\"PYTORCH_CUDA_ALLOC_CONF\", \"expandable_segments:True\")\n\n # Seed\n try:\n import random\n random.seed(int(seed))\n np.random.seed(int(seed))\n torch.manual_seed(int(seed))\n except Exception:\n pass\n\n # Prefer explicit cache_dir; else env; else project root (/..../checkpoints)\n if not cache_dir:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n if env_cache:\n cache_dir = env_cache\n else:\n mod_dir = os.path.dirname(__file__)\n proj_root = os.path.abspath(os.path.join(mod_dir, \"..\", \"..\", \"..\"))\n cache_dir = os.path.join(proj_root, \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers)\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n def _tok_encode(text: str) -> List[int]:\n return tok_local.encode(text)\n def _tok_len(text: str) -> int:\n try:\n return len(tok_local.encode(text))\n except Exception:\n return 0\n def _tok_decode(ids: torch.Tensor | List[int]) -> str:\n if isinstance(ids, torch.Tensor):\n try:\n ids_l = ids.to(\"cpu\").tolist()\n except Exception:\n ids_l = []\n else:\n ids_l = list(ids)\n try:\n return tok_local.decode(ids_l, skip_special_tokens=True)\n except Exception:\n return tok_local.decode(ids_l)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n # Load base adapters\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n\n # Selection via caches (program-agnostic): prefer windows with prompt overlap\n files: List[str] = []\n try:\n adapters_dir = os.path.dirname(adapters_npz)\n windows = load_windows_index(adapters_dir)\n files = pick_files_from_windows(root, windows, prompt, k=max(8, int(ff_max_candidates)))\n except Exception:\n files = []\n modules: List[str] = []\n\n # Subgraph embedding\n # Program-agnostic: skip subgraph embedding if no backend embedder; use base only\n sub = {\"layers\": []}\n\n # Shapes and targets\n tmap = targets_map(\"local\")\n t_shapes = infer_target_shapes(model)\n num_layers = len(getattr(model, \"blocks\", []))\n d_model_local = int(t_shapes.get(\"q_proj\", (0, 0))[0]) or int(getattr(getattr(model, \"cfg\", None), \"d_model\", 0) or 0)\n\n # Entropy-aware capacity\n scaled_rank = int(rank)\n scaled_gsub = float(gsub)\n entropy_diag: Optional[Dict[str, float]] = None\n\n # No subgraph z available -> keep sub empty (base-only mixing)\n sub = {\"layers\": []}\n\n # Function-first cones: build and merge\n cr = max(0, int(cone_rank))\n # Skip function-first cones in program-agnostic core (requires backend embedder)\n\n # Optional mixture bank: mix top-m module adapters from bank by concatenation (Σ π_i Δθ_i)\n if adapters_bank and int(mixture_m) > 0:\n try:\n import glob\n bank_root = os.path.abspath(os.path.expanduser(os.path.expandvars(adapters_bank)))\n sel_mods = [m for m in (modules or []) if m]\n picked = 0\n for mod in sel_mods:\n if picked >= int(mixture_m):\n break\n mod_dir1 = os.path.join(bank_root, \"sub_adapters\", mod.replace(\"/\", \"_\"))\n mod_dir2 = os.path.join(bank_root, mod.replace(\"/\", \"_\"))\n cand = None\n for d in (mod_dir1, mod_dir2):\n fp = os.path.join(d, \"adapters.npz\")\n if os.path.isfile(fp):\n cand = fp\n break\n if not cand:\n continue\n try:\n bank_ad = load_adapters_npz(cand)\n except Exception:\n continue\n # uniform π for now\n w = 1.0 / float(min(len(sel_mods), int(mixture_m)))\n sw = float(max(0.0, min(1.0, w))) ** 0.5\n # per-layer concat\n merged_layers = []\n for i in range(num_layers):\n baseL = sub[\"layers\"][i]\n bL = bank_ad[\"layers\"][i] if i < len(bank_ad.get(\"layers\", [])) else {}\n dst: Dict[str, Dict[str, np.ndarray]] = {}\n for name in tmap.keys():\n if (name in baseL) and (name in bL):\n A1 = baseL[name][\"A\"]; B1 = baseL[name][\"B\"]\n A2 = (sw * bL[name][\"A\"]).astype(np.float32); B2 = (sw * bL[name][\"B\"]).astype(np.float32)\n A = np.concatenate([A1, A2], axis=1)\n B = np.concatenate([B1, B2], axis=0)\n dst[name] = {\"A\": A, \"B\": B, \"gate\": baseL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n elif name in baseL:\n dst[name] = baseL[name]\n elif name in bL:\n A = (sw * bL[name][\"A\"]).astype(np.float32)\n dst[name] = {\"A\": A, \"B\": bL[name][\"B\"], \"gate\": bL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n merged_layers.append(dst)\n sub = {\"layers\": merged_layers}\n # try infer rank increment from a present target\n try:\n for i in range(num_layers):\n any_name = next((n for n in tmap.keys() if n in bank_ad[\"layers\"][i]), None)\n if any_name:\n inc = int(bank_ad[\"layers\"][i][any_name][\"B\"].shape[0])\n scaled_rank = int(scaled_rank + inc)\n break\n except Exception:\n pass\n picked += 1\n except Exception:\n pass\n\n # Optional rounding of LoRA factors\n if bool(round_lora):\n try:\n thr = float(max(0.0, round_threshold))\n for i in range(len(sub.get(\"layers\", []))):\n for name, tensors in sub[\"layers\"][i].items():\n for key in (\"A\", \"B\"):\n arr = tensors.get(key)\n if not isinstance(arr, np.ndarray):\n continue\n q = float(np.median(np.abs(arr))) if arr.size > 0 else 0.0\n if q <= 0:\n continue\n out = np.where(np.abs(arr) < (thr * q), 0.0, np.sign(arr) * q).astype(np.float32)\n tensors[key] = out\n except Exception:\n pass\n\n # Final prompt with optional context\n final_prompt = prompt\n if bool(pack_context) and files:\n adapters_dir = os.path.dirname(adapters_npz)\n mf = load_manifest(adapters_dir)\n # Simple windows-based packing: use windows_index ranges, read from program root\n try:\n windows = load_windows_index(adapters_dir)\n except Exception:\n windows = []\n lines_out: List[str] = [\"Program windows:\"]\n used = 0\n budget = int(context_tokens)\n # Choose up to one top window per selected file\n for rel in files:\n rel_n = rel.replace(\"\\\\\", \"/\")\n cand = [w for w in windows if (w.get(\"path\") or \"\").replace(\"\\\\\", \"/\") == rel_n]\n if not cand:\n continue\n w = cand[0]\n a = int(w.get(\"start_line\", 1))\n b = int(w.get(\"end_line\", max(a, a + 60)))\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n a = max(1, min(a, len(src_lines)))\n b = max(a, min(b, len(src_lines)))\n block = [f\"[ctx] path: {rel}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text_block = \"\\n\".join(block) + \"\\n\"\n t = _tok_len(text_block)\n if used + t > budget:\n continue\n lines_out.extend(block)\n used += t\n if used >= budget:\n break\n packed = \"\\n\".join(lines_out) if len(lines_out) > 1 else \"\"\n if packed:\n final_prompt = packed + \"\\n\\n\" + final_prompt\n if require_citations:\n # Tailor the example path to the first selected file for better compliance\n try:\n example_rel = files[0] if files else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n final_prompt\n + f\"\\n\\nInstruction: For EVERY claim, append a citation of the form {example_path}:START-END.\\n\"\n \"Use only files shown in [ctx] above. Provide at least 3 citations overall.\\n\"\n )\n\n # Adapter-aware decoding: pointer-first nudge when citations are required\n if bool(adapter_aware_decoding) and bool(require_citations):\n try:\n example_rel = files[0] if files else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n f\"[pointer-first] Start with a citation like [{example_path}:A-B], then explain.\\n\\n\"\n + final_prompt\n )\n ids = torch.tensor([_tok_encode(final_prompt)], dtype=torch.long, device=device)\n x = {\"input_ids\": ids}\n\n # Apply mixed adapters\n hooks = []\n def _parse_target_weights(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n for part in str(spec).split(\",\"):\n part = part.strip()\n if not part:\n continue\n if \"=\" in part:\n k, v = part.split(\"=\", 1)\n k = k.strip()\n try:\n out[k] = float(v)\n except Exception:\n continue\n else:\n out[part] = 1.0\n return out or None\n except Exception:\n return None\n tw = _parse_target_weights(target_weights) or {}\n # Optional question-aware reweighting\n if bool(q_aware_weights):\n try:\n ql = str(prompt).lower()\n mul: Dict[str, float] = {}\n if any(k in ql for k in [\"signature\", \"param\", \"argument\", \"type\", \"prototype\"]):\n mul.update({\"o_proj\": 1.10, \"v_proj\": 1.08})\n if any(k in ql for k in [\"why\", \"fail\", \"error\", \"behavior\", \"incorrect\", \"bug\"]):\n mul.update({\"up_proj\": 1.06, \"down_proj\": 1.05, \"gate_proj\": 1.04})\n if any(k in ql for k in [\"where\", \"defined\", \"definition\", \"locate\", \"find\"]):\n mul.update({\"q_proj\": 1.03})\n for k, m in mul.items():\n tw[k] = float(tw.get(k, 1.0)) * float(m)\n except Exception:\n pass\n # Optional ablations\n if bool(ablate_attn):\n for k in (\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"):\n tw[k] = 0.0\n if bool(ablate_mlp):\n for k in (\"up_proj\", \"down_proj\", \"gate_proj\"):\n tw[k] = 0.0\n # Optional per-layer schedule (gentle rise toward top third)\n layer_multipliers: Optional[List[float]] = None\n if bool(layer_schedule):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n layer_multipliers = []\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n if frac < (1.0 / 3.0):\n lm = 0.95 + 0.15 * (frac / (1.0 / 3.0))\n elif frac < (2.0 / 3.0):\n lm = 1.05 + 0.05 * ((frac - (1.0 / 3.0)) / (1.0 / 3.0))\n else:\n lm = 1.10 + 0.05 * ((frac - (2.0 / 3.0)) / (1.0 / 3.0))\n layer_multipliers.append(float(lm))\n except Exception:\n layer_multipliers = None\n # Optional per-target rank trimming (global per target; optional budget)\n per_target_keep: Optional[Dict[str, int]] = None\n if bool(per_target_rank_schedule):\n try:\n # Heuristic fractions by target group\n base_frac: Dict[str, float] = {\n \"o_proj\": 1.00, \"up_proj\": 1.00, \"down_proj\": 0.90, \"gate_proj\": 0.80,\n \"q_proj\": 0.70, \"k_proj\": 0.65, \"v_proj\": 0.60,\n }\n per_target_keep = {}\n for t, frac in base_frac.items():\n keep = int(max(1, min(int(scaled_rank), round(int(scaled_rank) * float(frac)))))\n per_target_keep[t] = keep\n # Apply global per-layer rank budget if requested\n try:\n budget = int(max(0, int(rank_budget)))\n except Exception:\n budget = 0\n if budget > 0 and per_target_keep:\n total = int(sum(int(v) for v in per_target_keep.values()))\n if total > budget:\n scale = float(budget) / float(max(1, total))\n for t in list(per_target_keep.keys()):\n per_target_keep[t] = int(max(1, round(int(per_target_keep[t]) * scale)))\n except Exception:\n per_target_keep = None\n alpha_used = float(alpha * (0.5 if bool(alpha_warmup) else 1.0))\n # Optional layer-tiered per-target keeps\n per_target_keep_layers: Optional[List[Dict[str, int]]] = None\n if bool(layer_rank_tiers):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n per_target_keep_layers = []\n def tier_for(frac: float) -> str:\n if frac < (1.0/3.0): return \"low\"\n if frac < (2.0/3.0): return \"mid\"\n return \"top\"\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n tier = tier_for(frac)\n # desired keeps by group\n if tier == \"low\":\n vals = {\"q_proj\": 2, \"k_proj\": 2, \"v_proj\": 8, \"o_proj\": 8, \"up_proj\": 12, \"down_proj\": 12}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"])) # 6\n elif tier == \"mid\":\n vals = {\"q_proj\": 3, \"k_proj\": 3, \"v_proj\": 12, \"o_proj\": 12, \"up_proj\": 16, \"down_proj\": 16}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"])) # 8\n else:\n vals = {\"q_proj\": 4, \"k_proj\": 4, \"v_proj\": 16, \"o_proj\": 16, \"up_proj\": 24, \"down_proj\": 24}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"])) # 8\n # cap by scaled_rank and >=1\n for k in list(vals.keys()):\n vals[k] = int(max(1, min(int(scaled_rank), int(vals[k]))))\n per_target_keep_layers.append(vals)\n except Exception:\n per_target_keep_layers = None\n hooks = register_hook_mixed_adapters(\n model,\n base_layers,\n sub.get(\"layers\"),\n alpha_star=float(alpha_used),\n g_sub=float(scaled_gsub),\n rank=int(scaled_rank),\n beta=float(beta),\n target_weights=tw,\n backend=\"local\",\n layer_multipliers=layer_multipliers,\n per_target_keep=per_target_keep,\n per_target_keep_layers=per_target_keep_layers,\n delta_cap=float(max(0.0, delta_cap)),\n )\n\n # Generation (always use model.generate for stability)\n if verbose:\n print(\"[debug] generating...\")\n try:\n gen_kwargs = {\n \"max_new_tokens\": int(max_new_tokens),\n \"do_sample\n# ... truncated ...","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner.generate_answer_structured","uri":"program://Program_Conditioned_Adapter/function/modules.runner.generate_answer_structured#L747-L1509","kind":"function","name":"generate_answer_structured","path":"modules/runner.py","language":"python","start_line":747,"end_line":1509,"context_start_line":727,"context_end_line":1512,"code":" mn = os.popen(f\"git -C {root} rev-parse HEAD\").read().strip()\n except Exception:\n mn = None\n if mn:\n short = mn[:12]\n text = text.rstrip() + f\"\\n\\nAnswer valid for commit {short}.\"\n except Exception:\n pass\n\n # Clean up hooks\n for h in hooks:\n try:\n h.remove()\n except Exception:\n pass\n\n return text\n\n\n\ndef generate_answer_structured(\n model_id: str,\n adapters_npz: str,\n prompt: str,\n *,\n program_root: Optional[str] = None,\n delta_cap: float = 0.05,\n # Same knobs as generate_answer with a few extras\n cache_dir: Optional[str] = None,\n device: str = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n device_map: str = \"none\",\n gpu_ids: Optional[str] = None,\n of_sources: str = \"question\",\n zoom_symbol: Optional[str] = None,\n zoom_radius: int = 0,\n ignore: Optional[List[str]] = None,\n pack_context: bool = True,\n pack_mode: str = \"windows\",\n context_tokens: int = 3000,\n function_first: bool = True,\n ff_max_candidates: int = 24,\n ff_window_lines: int = 80,\n ff_threshold: float = 0.55,\n ff_noise_penalty: float = 0.30,\n alpha: float = 16.0,\n rank: int = 12,\n gsub: float = 0.75,\n beta: float = 0.1,\n entropy_aware: bool = False,\n rank_min: int = 8,\n rank_max: int = 32,\n gsub_min: float = 0.6,\n gsub_max: float = 0.9,\n entropy_weights: str = \"program=0.4,subgraph=0.4,question=0.2\",\n target_weights: Optional[str] = None,\n layer_schedule: bool = False,\n q_aware_weights: bool = False,\n mixture_m: int = 0,\n adapters_bank: Optional[str] = None,\n # Extra mixing/decoding knobs (opt-in)\n per_target_rank_schedule: bool = False,\n rank_budget: int = 0,\n ablate_attn: bool = False,\n ablate_mlp: bool = False,\n alpha_warmup: bool = False,\n adapter_aware_decoding: bool = False,\n layer_rank_tiers: bool = False,\n cone_rank: int = 2,\n cone_weight: float = 0.5,\n round_lora: bool = False,\n round_threshold: float = 0.5,\n require_citations: bool = True,\n citations_per_paragraph: bool = False,\n rerank: bool = True,\n self_queries_path: Optional[str] = None,\n do_sample: bool = False,\n temperature: float = 0.7,\n top_p: float = 0.9,\n repetition_penalty: float = 1.1,\n min_new_tokens: int = 64,\n max_new_tokens: int = 256,\n kv_window: int = 0,\n head_device: str = \"same\",\n seed: int = 0,\n verbose: bool = False,\n commit_footer: bool = False,\n # New dcpo/lfp controls\n lfp_iters: int = 1,\n budget_H: float = 0.0,\n monotone_selection: bool = True,\n program_state_path: Optional[str] = None,\n # Powerdomain strength\n samples: int = 1,\n # Delta join semantics\n cone_join: str = \"concat\", # \"concat\" | \"weighted\"\n # Telemetry\n telemetry_out: Optional[str] = None,\n telemetry_verify_tests: bool = False,\n) -> Dict[str, Any]:\n \"\"\"Generate a structured answer with citations and program state updates.\n\n Returns a dict: {text, citations, must, may, selection, lfp_passes, converged, provenance, confidence}.\n \"\"\"\n # Program root (path or URI) for reading artifacts/windows/state\n root = str(program_root) if (program_root is not None and str(program_root).strip()) else \"\"\n t_start = time.time()\n if gpu_ids and str(gpu_ids).strip():\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_ids).strip()\n os.environ.setdefault(\"PYTORCH_CUDA_ALLOC_CONF\", \"expandable_segments:True\")\n\n # Seed\n try:\n import random\n random.seed(int(seed))\n np.random.seed(int(seed))\n torch.manual_seed(int(seed))\n except Exception:\n pass\n\n # Cache\n if not cache_dir:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n cache_dir = env_cache or os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\")), \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers) and adapter for context/scoring utilities\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n # Program-agnostic structured path: skip graph embedder; retain main flow\n _ps_path = program_state_path if (program_state_path is not None and str(program_state_path).strip()) else None\n state_prev = load_program_state(root, path=_ps_path)\n\n must_set: Set[Tuple[str, int, int]] = set()\n union_set: Set[Tuple[str, int, int]] = set()\n selected_mods_acc: Set[str] = set(state_prev.candidates_modules) if monotone_selection else set()\n selected_files_acc: Set[str] = set(state_prev.candidates_files) if monotone_selection else set()\n lfp_passes = 0\n converged = False\n\n def _one_pass(mods_hint: List[str], files_hint: List[str]) -> Tuple[str, List[str], List[str], Dict[str, Any], Set[Tuple[str,int,int]], Set[Tuple[str,int,int]], Dict[str, Any]]:\n nonlocal model\n # Initial selection (question or zoom), union with hints for monotonicity\n ignore_list = [s for s in (ignore or []) if s]\n adapters_dir = os.path.dirname(adapters_npz)\n windows = load_windows_index(adapters_dir)\n files = pick_files_from_windows(root, windows, prompt, k=max(8, int(ff_max_candidates)))\n # initialize modules list for return and monotone selection merging\n modules: List[str] = []\n if monotone_selection:\n modules = sorted(list(set(mods_hint) | set(modules)))\n files = sorted(list(set(files_hint) | set(files)))\n # Subgraph emb\n # No subgraph embedding in program-agnostic structured path\n # Shapes/targets\n tmap = targets_map(\"local\")\n t_shapes = infer_target_shapes(model)\n num_layers = len(getattr(model, \"blocks\", []))\n d_model_local = int(t_shapes.get(\"q_proj\", (0, 0))[0]) or int(getattr(getattr(model, \"cfg\", None), \"d_model\", 0) or 0)\n # Entropy-aware\n scaled_rank = int(rank)\n scaled_gsub = float(gsub)\n # Build sub adapters\n sub = {\"layers\": []}\n # Optional cones\n cr = max(0, int(cone_rank))\n if bool(function_first) and cr > 0:\n try:\n pass\n except Exception:\n pass\n # Optional mixture bank: mix top-m module adapters from bank by concatenation (Σ π_i Δθ_i)\n if adapters_bank and int(mixture_m) > 0:\n try:\n import glob\n bank_root = os.path.abspath(os.path.expanduser(os.path.expandvars(adapters_bank)))\n sel_mods = [m for m in (modules or []) if m]\n picked = 0\n for mod in sel_mods:\n if picked >= int(mixture_m):\n break\n mod_dir1 = os.path.join(bank_root, \"sub_adapters\", mod.replace(\"/\", \"_\"))\n mod_dir2 = os.path.join(bank_root, mod.replace(\"/\", \"_\"))\n cand = None\n for d in (mod_dir1, mod_dir2):\n fp = os.path.join(d, \"adapters.npz\")\n if os.path.isfile(fp):\n cand = fp\n break\n if not cand:\n continue\n try:\n bank_ad = load_adapters_npz(cand)\n except Exception:\n continue\n # uniform π for now\n w = 1.0 / float(min(len(sel_mods), int(mixture_m)))\n sw = float(max(0.0, min(1.0, w))) ** 0.5\n # per-layer concat\n merged_layers = []\n for i in range(num_layers):\n baseL = sub[\"layers\"][i]\n bL = bank_ad[\"layers\"][i] if i < len(bank_ad.get(\"layers\", [])) else {}\n dst: Dict[str, Dict[str, np.ndarray]] = {}\n for name in tmap.keys():\n if (name in baseL) and (name in bL):\n A1 = baseL[name][\"A\"]; B1 = baseL[name][\"B\"]\n A2 = (sw * bL[name][\"A\"]).astype(np.float32); B2 = (sw * bL[name][\"B\"]).astype(np.float32)\n A = np.concatenate([A1, A2], axis=1)\n B = np.concatenate([B1, B2], axis=0)\n dst[name] = {\"A\": A, \"B\": B, \"gate\": baseL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n elif name in baseL:\n dst[name] = baseL[name]\n elif name in bL:\n A = (sw * bL[name][\"A\"]).astype(np.float32)\n dst[name] = {\"A\": A, \"B\": bL[name][\"B\"], \"gate\": bL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n merged_layers.append(dst)\n sub = {\"layers\": merged_layers}\n # try infer rank increment from a present target\n try:\n for i in range(num_layers):\n any_name = next((n for n in tmap.keys() if n in bank_ad[\"layers\"][i]), None)\n if any_name:\n inc = int(bank_ad[\"layers\"][i][any_name][\"B\"].shape[0])\n scaled_rank = int(scaled_rank + inc)\n break\n except Exception:\n pass\n picked += 1\n except Exception:\n pass\n # Rounding\n if bool(round_lora):\n try:\n thr = float(max(0.0, round_threshold))\n for i in range(len(sub.get(\"layers\", []))):\n for name, tensors in sub[\"layers\"][i].items():\n for key in (\"A\", \"B\"):\n arr = tensors.get(key)\n if not isinstance(arr, np.ndarray):\n continue\n q = float(np.median(np.abs(arr))) if arr.size > 0 else 0.0\n if q <= 0:\n continue\n out = np.where(np.abs(arr) < (thr * q), 0.0, np.sign(arr) * q).astype(np.float32)\n tensors[key] = out\n except Exception:\n pass\n\n # Delta norm diagnostics (approximate AB norms per target averaged across layers)\n delta_norms: Dict[str, float] = {}\n try:\n sums: Dict[str, float] = {}\n counts: Dict[str, int] = {}\n for i in range(len(sub.get(\"layers\", []))):\n for name, tensors in sub[\"layers\"][i].items():\n try:\n A = tensors.get(\"A\"); B = tensors.get(\"B\")\n if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):\n AB = (A @ B)\n n = float(np.linalg.norm(AB)) if AB.size > 0 else 0.0\n sums[name] = float(sums.get(name, 0.0) + n)\n counts[name] = int(counts.get(name, 0) + 1)\n except Exception:\n continue\n for k, v in sums.items():\n c = max(1, int(counts.get(k, 1)))\n delta_norms[k] = float(v / float(c))\n except Exception:\n delta_norms = {}\n\n # Prompt + context\n final_prompt = prompt\n files_for_ctx = list(files)\n if bool(pack_context) and files_for_ctx:\n packed = \"\"\n ident_header = \"\"\n if pack_mode == \"windows\":\n if bool(function_first):\n # Simplified: pack top window per file like non-structured path\n adapters_dir = os.path.dirname(adapters_npz)\n try:\n windows = load_windows_index(adapters_dir)\n except Exception:\n windows = []\n out_lines: List[str] = [\"Program windows (function-first):\"]\n used = 0\n budget = int(context_tokens)\n for rel in files_for_ctx:\n cand = [w for w in windows if (w.get(\"path\") or \"\").replace(\"\\\\\", \"/\") == rel.replace(\"\\\\\", \"/\")]\n if not cand:\n continue\n w = cand[0]\n a = int(w.get(\"start_line\", 1)); b = int(w.get(\"end_line\", a + 60))\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n a = max(1, min(a, len(src_lines))); b = max(a, min(b, len(src_lines)))\n block = [f\"[ctx] path: {rel}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text_block = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text_block).input_ids)\n if used + t > budget:\n continue\n out_lines.extend(block)\n used += t\n if used >= budget:\n break\n packed = \"\\n\".join(out_lines) if len(out_lines) > 1 else \"\"\n else:\n # Fallback: identical to function-first simplified path\n adapters_dir = os.path.dirname(adapters_npz)\n try:\n windows = load_windows_index(adapters_dir)\n except Exception:\n windows = []\n out_lines: List[str] = [\"Program windows:\"]\n used = 0\n budget = int(context_tokens)\n for rel in files_for_ctx:\n cand = [w for w in windows if (w.get(\"path\") or \"\").replace(\"\\\\\", \"/\") == rel.replace(\"\\\\\", \"/\")]\n if not cand:\n continue\n w = cand[0]\n a = int(w.get(\"start_line\", 1)); b = int(w.get(\"end_line\", a + 60))\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n a = max(1, min(a, len(src_lines))); b = max(a, min(b, len(src_lines)))\n block = [f\"[ctx] path: {rel}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text_block = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text_block).input_ids)\n if used + t > budget:\n continue\n out_lines.extend(block)\n used += t\n if used >= budget:\n break\n packed = \"\\n\".join(out_lines) if len(out_lines) > 1 else \"\"\n else:\n packed = \"\" # heads mode omitted in program-agnostic core\n if packed:\n final_prompt = (ident_header + packed + \"\\n\\n\" + final_prompt) if ident_header else (packed + \"\\n\\n\" + final_prompt)\n if require_citations:\n try:\n example_rel = files_for_ctx[0] if files_for_ctx else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n final_prompt\n + f\"\\n\\nInstruction: For EVERY claim, append a citation of the form {example_path}:START-END.\\n\"\n \"Use only files shown in [ctx] above. Provide at least 3 citations overall.\\n\"\n )\n # Adapter-aware decoding prompt nudge (pointer-first) for consistency\n if bool(adapter_aware_decoding):\n try:\n example_rel = files_for_ctx[0] if files_for_ctx else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n f\"[pointer-first] Start with a citation like [{example_path}:A-B], then explain.\\n\\n\"\n + final_prompt\n )\n\n # Apply mixed adapters\n x_ids = torch.tensor([[i for i in tok_local.encode(final_prompt)]], dtype=torch.long, device=device)\n x = {\"input_ids\": x_ids}\n def _parse_target_weights(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n for part in str(spec).split(\",\"):\n part = part.strip()\n if not part:\n continue\n if \"=\" in part:\n k, v = part.split(\"=\", 1)\n k = k.strip(); out[k] = float(v)\n else:\n out[part] = 1.0\n return out or None\n except Exception:\n return None\n tw = _parse_target_weights(target_weights) or {}\n # Optional question-aware reweighting\n if bool(q_aware_weights):\n try:\n ql = str(prompt).lower()\n mul: Dict[str, float] = {}\n if any(k in ql for k in [\"signature\", \"param\", \"argument\", \"type\", \"prototype\"]):\n mul.update({\"o_proj\": 1.10, \"v_proj\": 1.08})\n if any(k in ql for k in [\"why\", \"fail\", \"error\", \"behavior\", \"incorrect\", \"bug\"]):\n mul.update({\"up_proj\": 1.06, \"down_proj\": 1.05, \"gate_proj\": 1.04})\n if any(k in ql for k in [\"where\", \"defined\", \"definition\", \"locate\", \"find\"]):\n mul.update({\"q_proj\": 1.03})\n for k, m in mul.items():\n tw[k] = float(tw.get(k, 1.0)) * float(m)\n except Exception:\n pass\n # Optional per-layer schedule\n layer_multipliers: Optional[List[float]] = None\n if bool(layer_schedule):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n layer_multipliers = []\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n if frac < (1.0 / 3.0):\n lm = 0.95 + 0.15 * (frac / (1.0 / 3.0))\n elif frac < (2.0 / 3.0):\n lm = 1.05 + 0.05 * ((frac - (1.0 / 3.0))\n# ... truncated ...","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._TokAdapter","uri":"program://Program_Conditioned_Adapter/class/modules.runner._TokAdapter#L858-L868","kind":"class","name":"_TokAdapter","path":"modules/runner.py","language":"python","start_line":858,"end_line":868,"context_start_line":838,"context_end_line":888,"code":" try:\n import random\n random.seed(int(seed))\n np.random.seed(int(seed))\n torch.manual_seed(int(seed))\n except Exception:\n pass\n\n # Cache\n if not cache_dir:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n cache_dir = env_cache or os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\")), \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers) and adapter for context/scoring utilities\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n # Program-agnostic structured path: skip graph embedder; retain main flow\n _ps_path = program_state_path if (program_state_path is not None and str(program_state_path).strip()) else None\n state_prev = load_program_state(root, path=_ps_path)\n\n must_set: Set[Tuple[str, int, int]] = set()\n union_set: Set[Tuple[str, int, int]] = set()\n selected_mods_acc: Set[str] = set(state_prev.candidates_modules) if monotone_selection else set()\n selected_files_acc: Set[str] = set(state_prev.candidates_files) if monotone_selection else set()\n lfp_passes = 0\n converged = False\n\n def _one_pass(mods_hint: List[str], files_hint: List[str]) -> Tuple[str, List[str], List[str], Dict[str, Any], Set[Tuple[str,int,int]], Set[Tuple[str,int,int]], Dict[str, Any]]:\n nonlocal model\n # Initial selection (question or zoom), union with hints for monotonicity\n ignore_list = [s for s in (ignore or []) if s]","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._tok_encode","uri":"program://Program_Conditioned_Adapter/function/modules.runner._tok_encode#L203-L204","kind":"function","name":"_tok_encode","path":"modules/runner.py","language":"python","start_line":203,"end_line":204,"context_start_line":183,"context_end_line":224,"code":" try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers)\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n def _tok_encode(text: str) -> List[int]:\n return tok_local.encode(text)\n def _tok_len(text: str) -> int:\n try:\n return len(tok_local.encode(text))\n except Exception:\n return 0\n def _tok_decode(ids: torch.Tensor | List[int]) -> str:\n if isinstance(ids, torch.Tensor):\n try:\n ids_l = ids.to(\"cpu\").tolist()\n except Exception:\n ids_l = []\n else:\n ids_l = list(ids)\n try:\n return tok_local.decode(ids_l, skip_special_tokens=True)\n except Exception:\n return tok_local.decode(ids_l)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._tok_len","uri":"program://Program_Conditioned_Adapter/function/modules.runner._tok_len#L205-L209","kind":"function","name":"_tok_len","path":"modules/runner.py","language":"python","start_line":205,"end_line":209,"context_start_line":185,"context_end_line":229,"code":" except Exception:\n pass\n\n # Local tokenizer (no transformers)\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n def _tok_encode(text: str) -> List[int]:\n return tok_local.encode(text)\n def _tok_len(text: str) -> int:\n try:\n return len(tok_local.encode(text))\n except Exception:\n return 0\n def _tok_decode(ids: torch.Tensor | List[int]) -> str:\n if isinstance(ids, torch.Tensor):\n try:\n ids_l = ids.to(\"cpu\").tolist()\n except Exception:\n ids_l = []\n else:\n ids_l = list(ids)\n try:\n return tok_local.decode(ids_l, skip_special_tokens=True)\n except Exception:\n return tok_local.decode(ids_l)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n # Load base adapters\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n\n # Selection via caches (program-agnostic): prefer windows with prompt overlap","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._tok_decode","uri":"program://Program_Conditioned_Adapter/function/modules.runner._tok_decode#L210-L221","kind":"function","name":"_tok_decode","path":"modules/runner.py","language":"python","start_line":210,"end_line":221,"context_start_line":190,"context_end_line":241,"code":" tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n def _tok_encode(text: str) -> List[int]:\n return tok_local.encode(text)\n def _tok_len(text: str) -> int:\n try:\n return len(tok_local.encode(text))\n except Exception:\n return 0\n def _tok_decode(ids: torch.Tensor | List[int]) -> str:\n if isinstance(ids, torch.Tensor):\n try:\n ids_l = ids.to(\"cpu\").tolist()\n except Exception:\n ids_l = []\n else:\n ids_l = list(ids)\n try:\n return tok_local.decode(ids_l, skip_special_tokens=True)\n except Exception:\n return tok_local.decode(ids_l)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n # Load base adapters\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n\n # Selection via caches (program-agnostic): prefer windows with prompt overlap\n files: List[str] = []\n try:\n adapters_dir = os.path.dirname(adapters_npz)\n windows = load_windows_index(adapters_dir)\n files = pick_files_from_windows(root, windows, prompt, k=max(8, int(ff_max_candidates)))\n except Exception:\n files = []\n modules: List[str] = []\n\n # Subgraph embedding\n # Program-agnostic: skip subgraph embedding if no backend embedder; use base only\n sub = {\"layers\": []}","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._parse_target_weights","uri":"program://Program_Conditioned_Adapter/function/modules.runner._parse_target_weights#L1117-L1133","kind":"function","name":"_parse_target_weights","path":"modules/runner.py","language":"python","start_line":1117,"end_line":1133,"context_start_line":1097,"context_end_line":1153,"code":" final_prompt = (\n final_prompt\n + f\"\\n\\nInstruction: For EVERY claim, append a citation of the form {example_path}:START-END.\\n\"\n \"Use only files shown in [ctx] above. Provide at least 3 citations overall.\\n\"\n )\n # Adapter-aware decoding prompt nudge (pointer-first) for consistency\n if bool(adapter_aware_decoding):\n try:\n example_rel = files_for_ctx[0] if files_for_ctx else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n f\"[pointer-first] Start with a citation like [{example_path}:A-B], then explain.\\n\\n\"\n + final_prompt\n )\n\n # Apply mixed adapters\n x_ids = torch.tensor([[i for i in tok_local.encode(final_prompt)]], dtype=torch.long, device=device)\n x = {\"input_ids\": x_ids}\n def _parse_target_weights(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n for part in str(spec).split(\",\"):\n part = part.strip()\n if not part:\n continue\n if \"=\" in part:\n k, v = part.split(\"=\", 1)\n k = k.strip(); out[k] = float(v)\n else:\n out[part] = 1.0\n return out or None\n except Exception:\n return None\n tw = _parse_target_weights(target_weights) or {}\n # Optional question-aware reweighting\n if bool(q_aware_weights):\n try:\n ql = str(prompt).lower()\n mul: Dict[str, float] = {}\n if any(k in ql for k in [\"signature\", \"param\", \"argument\", \"type\", \"prototype\"]):\n mul.update({\"o_proj\": 1.10, \"v_proj\": 1.08})\n if any(k in ql for k in [\"why\", \"fail\", \"error\", \"behavior\", \"incorrect\", \"bug\"]):\n mul.update({\"up_proj\": 1.06, \"down_proj\": 1.05, \"gate_proj\": 1.04})\n if any(k in ql for k in [\"where\", \"defined\", \"definition\", \"locate\", \"find\"]):\n mul.update({\"q_proj\": 1.03})\n for k, m in mul.items():\n tw[k] = float(tw.get(k, 1.0)) * float(m)\n except Exception:\n pass\n # Optional per-layer schedule\n layer_multipliers: Optional[List[float]] = None\n if bool(layer_schedule):\n try:","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._one_pass","uri":"program://Program_Conditioned_Adapter/function/modules.runner._one_pass#L885-L1361","kind":"function","name":"_one_pass","path":"modules/runner.py","language":"python","start_line":885,"end_line":1361,"context_start_line":865,"context_end_line":1381,"code":" ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n # Program-agnostic structured path: skip graph embedder; retain main flow\n _ps_path = program_state_path if (program_state_path is not None and str(program_state_path).strip()) else None\n state_prev = load_program_state(root, path=_ps_path)\n\n must_set: Set[Tuple[str, int, int]] = set()\n union_set: Set[Tuple[str, int, int]] = set()\n selected_mods_acc: Set[str] = set(state_prev.candidates_modules) if monotone_selection else set()\n selected_files_acc: Set[str] = set(state_prev.candidates_files) if monotone_selection else set()\n lfp_passes = 0\n converged = False\n\n def _one_pass(mods_hint: List[str], files_hint: List[str]) -> Tuple[str, List[str], List[str], Dict[str, Any], Set[Tuple[str,int,int]], Set[Tuple[str,int,int]], Dict[str, Any]]:\n nonlocal model\n # Initial selection (question or zoom), union with hints for monotonicity\n ignore_list = [s for s in (ignore or []) if s]\n adapters_dir = os.path.dirname(adapters_npz)\n windows = load_windows_index(adapters_dir)\n files = pick_files_from_windows(root, windows, prompt, k=max(8, int(ff_max_candidates)))\n # initialize modules list for return and monotone selection merging\n modules: List[str] = []\n if monotone_selection:\n modules = sorted(list(set(mods_hint) | set(modules)))\n files = sorted(list(set(files_hint) | set(files)))\n # Subgraph emb\n # No subgraph embedding in program-agnostic structured path\n # Shapes/targets\n tmap = targets_map(\"local\")\n t_shapes = infer_target_shapes(model)\n num_layers = len(getattr(model, \"blocks\", []))\n d_model_local = int(t_shapes.get(\"q_proj\", (0, 0))[0]) or int(getattr(getattr(model, \"cfg\", None), \"d_model\", 0) or 0)\n # Entropy-aware\n scaled_rank = int(rank)\n scaled_gsub = float(gsub)\n # Build sub adapters\n sub = {\"layers\": []}\n # Optional cones\n cr = max(0, int(cone_rank))\n if bool(function_first) and cr > 0:\n try:\n pass\n except Exception:\n pass\n # Optional mixture bank: mix top-m module adapters from bank by concatenation (Σ π_i Δθ_i)\n if adapters_bank and int(mixture_m) > 0:\n try:\n import glob\n bank_root = os.path.abspath(os.path.expanduser(os.path.expandvars(adapters_bank)))\n sel_mods = [m for m in (modules or []) if m]\n picked = 0\n for mod in sel_mods:\n if picked >= int(mixture_m):\n break\n mod_dir1 = os.path.join(bank_root, \"sub_adapters\", mod.replace(\"/\", \"_\"))\n mod_dir2 = os.path.join(bank_root, mod.replace(\"/\", \"_\"))\n cand = None\n for d in (mod_dir1, mod_dir2):\n fp = os.path.join(d, \"adapters.npz\")\n if os.path.isfile(fp):\n cand = fp\n break\n if not cand:\n continue\n try:\n bank_ad = load_adapters_npz(cand)\n except Exception:\n continue\n # uniform π for now\n w = 1.0 / float(min(len(sel_mods), int(mixture_m)))\n sw = float(max(0.0, min(1.0, w))) ** 0.5\n # per-layer concat\n merged_layers = []\n for i in range(num_layers):\n baseL = sub[\"layers\"][i]\n bL = bank_ad[\"layers\"][i] if i < len(bank_ad.get(\"layers\", [])) else {}\n dst: Dict[str, Dict[str, np.ndarray]] = {}\n for name in tmap.keys():\n if (name in baseL) and (name in bL):\n A1 = baseL[name][\"A\"]; B1 = baseL[name][\"B\"]\n A2 = (sw * bL[name][\"A\"]).astype(np.float32); B2 = (sw * bL[name][\"B\"]).astype(np.float32)\n A = np.concatenate([A1, A2], axis=1)\n B = np.concatenate([B1, B2], axis=0)\n dst[name] = {\"A\": A, \"B\": B, \"gate\": baseL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n elif name in baseL:\n dst[name] = baseL[name]\n elif name in bL:\n A = (sw * bL[name][\"A\"]).astype(np.float32)\n dst[name] = {\"A\": A, \"B\": bL[name][\"B\"], \"gate\": bL[name].get(\"gate\", np.array([0.0], dtype=np.float32))}\n merged_layers.append(dst)\n sub = {\"layers\": merged_layers}\n # try infer rank increment from a present target\n try:\n for i in range(num_layers):\n any_name = next((n for n in tmap.keys() if n in bank_ad[\"layers\"][i]), None)\n if any_name:\n inc = int(bank_ad[\"layers\"][i][any_name][\"B\"].shape[0])\n scaled_rank = int(scaled_rank + inc)\n break\n except Exception:\n pass\n picked += 1\n except Exception:\n pass\n # Rounding\n if bool(round_lora):\n try:\n thr = float(max(0.0, round_threshold))\n for i in range(len(sub.get(\"layers\", []))):\n for name, tensors in sub[\"layers\"][i].items():\n for key in (\"A\", \"B\"):\n arr = tensors.get(key)\n if not isinstance(arr, np.ndarray):\n continue\n q = float(np.median(np.abs(arr))) if arr.size > 0 else 0.0\n if q <= 0:\n continue\n out = np.where(np.abs(arr) < (thr * q), 0.0, np.sign(arr) * q).astype(np.float32)\n tensors[key] = out\n except Exception:\n pass\n\n # Delta norm diagnostics (approximate AB norms per target averaged across layers)\n delta_norms: Dict[str, float] = {}\n try:\n sums: Dict[str, float] = {}\n counts: Dict[str, int] = {}\n for i in range(len(sub.get(\"layers\", []))):\n for name, tensors in sub[\"layers\"][i].items():\n try:\n A = tensors.get(\"A\"); B = tensors.get(\"B\")\n if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):\n AB = (A @ B)\n n = float(np.linalg.norm(AB)) if AB.size > 0 else 0.0\n sums[name] = float(sums.get(name, 0.0) + n)\n counts[name] = int(counts.get(name, 0) + 1)\n except Exception:\n continue\n for k, v in sums.items():\n c = max(1, int(counts.get(k, 1)))\n delta_norms[k] = float(v / float(c))\n except Exception:\n delta_norms = {}\n\n # Prompt + context\n final_prompt = prompt\n files_for_ctx = list(files)\n if bool(pack_context) and files_for_ctx:\n packed = \"\"\n ident_header = \"\"\n if pack_mode == \"windows\":\n if bool(function_first):\n # Simplified: pack top window per file like non-structured path\n adapters_dir = os.path.dirname(adapters_npz)\n try:\n windows = load_windows_index(adapters_dir)\n except Exception:\n windows = []\n out_lines: List[str] = [\"Program windows (function-first):\"]\n used = 0\n budget = int(context_tokens)\n for rel in files_for_ctx:\n cand = [w for w in windows if (w.get(\"path\") or \"\").replace(\"\\\\\", \"/\") == rel.replace(\"\\\\\", \"/\")]\n if not cand:\n continue\n w = cand[0]\n a = int(w.get(\"start_line\", 1)); b = int(w.get(\"end_line\", a + 60))\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n a = max(1, min(a, len(src_lines))); b = max(a, min(b, len(src_lines)))\n block = [f\"[ctx] path: {rel}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text_block = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text_block).input_ids)\n if used + t > budget:\n continue\n out_lines.extend(block)\n used += t\n if used >= budget:\n break\n packed = \"\\n\".join(out_lines) if len(out_lines) > 1 else \"\"\n else:\n # Fallback: identical to function-first simplified path\n adapters_dir = os.path.dirname(adapters_npz)\n try:\n windows = load_windows_index(adapters_dir)\n except Exception:\n windows = []\n out_lines: List[str] = [\"Program windows:\"]\n used = 0\n budget = int(context_tokens)\n for rel in files_for_ctx:\n cand = [w for w in windows if (w.get(\"path\") or \"\").replace(\"\\\\\", \"/\") == rel.replace(\"\\\\\", \"/\")]\n if not cand:\n continue\n w = cand[0]\n a = int(w.get(\"start_line\", 1)); b = int(w.get(\"end_line\", a + 60))\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n a = max(1, min(a, len(src_lines))); b = max(a, min(b, len(src_lines)))\n block = [f\"[ctx] path: {rel}:{a}-{b}\"] + src_lines[a - 1 : b] + [\"\"]\n text_block = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text_block).input_ids)\n if used + t > budget:\n continue\n out_lines.extend(block)\n used += t\n if used >= budget:\n break\n packed = \"\\n\".join(out_lines) if len(out_lines) > 1 else \"\"\n else:\n packed = \"\" # heads mode omitted in program-agnostic core\n if packed:\n final_prompt = (ident_header + packed + \"\\n\\n\" + final_prompt) if ident_header else (packed + \"\\n\\n\" + final_prompt)\n if require_citations:\n try:\n example_rel = files_for_ctx[0] if files_for_ctx else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n final_prompt\n + f\"\\n\\nInstruction: For EVERY claim, append a citation of the form {example_path}:START-END.\\n\"\n \"Use only files shown in [ctx] above. Provide at least 3 citations overall.\\n\"\n )\n # Adapter-aware decoding prompt nudge (pointer-first) for consistency\n if bool(adapter_aware_decoding):\n try:\n example_rel = files_for_ctx[0] if files_for_ctx else None\n example_path = example_rel if example_rel else \"file.py\"\n except Exception:\n example_path = \"file.py\"\n final_prompt = (\n f\"[pointer-first] Start with a citation like [{example_path}:A-B], then explain.\\n\\n\"\n + final_prompt\n )\n\n # Apply mixed adapters\n x_ids = torch.tensor([[i for i in tok_local.encode(final_prompt)]], dtype=torch.long, device=device)\n x = {\"input_ids\": x_ids}\n def _parse_target_weights(spec: Optional[str]) -> Optional[Dict[str, float]]:\n if not spec:\n return None\n out: Dict[str, float] = {}\n try:\n for part in str(spec).split(\",\"):\n part = part.strip()\n if not part:\n continue\n if \"=\" in part:\n k, v = part.split(\"=\", 1)\n k = k.strip(); out[k] = float(v)\n else:\n out[part] = 1.0\n return out or None\n except Exception:\n return None\n tw = _parse_target_weights(target_weights) or {}\n # Optional question-aware reweighting\n if bool(q_aware_weights):\n try:\n ql = str(prompt).lower()\n mul: Dict[str, float] = {}\n if any(k in ql for k in [\"signature\", \"param\", \"argument\", \"type\", \"prototype\"]):\n mul.update({\"o_proj\": 1.10, \"v_proj\": 1.08})\n if any(k in ql for k in [\"why\", \"fail\", \"error\", \"behavior\", \"incorrect\", \"bug\"]):\n mul.update({\"up_proj\": 1.06, \"down_proj\": 1.05, \"gate_proj\": 1.04})\n if any(k in ql for k in [\"where\", \"defined\", \"definition\", \"locate\", \"find\"]):\n mul.update({\"q_proj\": 1.03})\n for k, m in mul.items():\n tw[k] = float(tw.get(k, 1.0)) * float(m)\n except Exception:\n pass\n # Optional per-layer schedule\n layer_multipliers: Optional[List[float]] = None\n if bool(layer_schedule):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n layer_multipliers = []\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n if frac < (1.0 / 3.0):\n lm = 0.95 + 0.15 * (frac / (1.0 / 3.0))\n elif frac < (2.0 / 3.0):\n lm = 1.05 + 0.05 * ((frac - (1.0 / 3.0)) / (1.0 / 3.0))\n else:\n lm = 1.10 + 0.05 * ((frac - (2.0 / 3.0)) / (1.0 / 3.0))\n layer_multipliers.append(float(lm))\n except Exception:\n layer_multipliers = None\n # Apply ablations\n if bool(ablate_attn):\n for k in (\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"):\n tw[k] = 0.0\n if bool(ablate_mlp):\n for k in (\"up_proj\", \"down_proj\", \"gate_proj\"):\n tw[k] = 0.0\n # Optional per-target rank trimming\n per_target_keep: Optional[Dict[str, int]] = None\n if bool(per_target_rank_schedule):\n try:\n base_frac: Dict[str, float] = {\n \"o_proj\": 1.00, \"up_proj\": 1.00, \"down_proj\": 0.90, \"gate_proj\": 0.80,\n \"q_proj\": 0.70, \"k_proj\": 0.65, \"v_proj\": 0.60,\n }\n per_target_keep = {}\n for t, frac in base_frac.items():\n keep = int(max(1, min(int(scaled_rank), round(int(scaled_rank) * float(frac)))))\n per_target_keep[t] = keep\n budget = int(max(0, int(rank_budget)))\n if budget > 0 and per_target_keep:\n total = int(sum(int(v) for v in per_target_keep.values()))\n if total > budget:\n scale = float(budget) / float(max(1, total))\n for t in list(per_target_keep.keys()):\n per_target_keep[t] = int(max(1, round(int(per_target_keep[t]) * scale)))\n except Exception:\n per_target_keep = None\n # Optional per-layer rank tiers\n per_target_keep_layers: Optional[List[Dict[str, int]]] = None\n if bool(layer_rank_tiers):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n per_target_keep_layers = []\n def tier_for(frac: float) -> str:\n if frac < (1.0/3.0): return \"low\"\n if frac < (2.0/3.0): return \"mid\"\n return \"top\"\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n tier = tier_for(frac)\n if tier == \"low\":\n vals = {\"q_proj\": 2, \"k_proj\": 2, \"v_proj\": 8, \"o_proj\": 8, \"up_proj\": 12, \"down_proj\": 12}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"]))\n elif tier == \"mid\":\n vals = {\"q_proj\": 3, \"k_proj\": 3, \"v_proj\": 12, \"o_proj\": 12, \"up_proj\": 16, \"down_proj\": 16}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"]))\n else:\n vals = {\"q_proj\": 4, \"k_proj\": 4, \"v_proj\": 16, \"o_proj\": 16, \"up_proj\": 24, \"down_proj\": 24}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"]))\n for k in list(vals.keys()):\n vals[k] = int(max(1, min(int(scaled_rank), int(vals[k]))))\n per_target_keep_layers.append(vals)\n except Exception:\n per_target_keep_layers = None\n # Alpha warmup on first structured pass\n alpha_used = float(alpha * (0.5 if (bool(alpha_warmup) and int(lfp_passes) == 0) else 1.0))\n hooks = register_hook_mixed_adapters(\n model,\n base_layers,\n sub.get(\"layers\"),\n alpha_star=float(alpha_used),\n g_sub=float(scaled_gsub),\n rank=int(scaled_rank),\n beta=float(beta),\n target_weights=tw,\n backend=\"local\",\n layer_multipliers=layer_multipliers,\n per_target_keep=per_target_keep,\n per_target_keep_layers=per_target_keep_layers,\n delta_cap=float(max(0.0, delta_cap)),\n )\n\n # Generate (possibly multiple samples)\n union_cites: Set[Tuple[str,int,int]] = set()\n inter_cites: Optional[Set[Tuple[str,int,int]]] = None\n text_first = \"\"\n gen_err = None\n oom_retry = False\n for si in range(max(1, int(samples))):\n try:\n gen_kwargs = {\"max_new_tokens\": int(max_new_tokens), \"do_sample\": bool(do_sample)}\n if do_sample:\n gen_kwargs.update({\n \"temperature\": float(temperature),\n# ... truncated ...","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner.__init__","uri":"program://Program_Conditioned_Adapter/function/modules.runner.__init__#L862-L863","kind":"function","name":"__init__","path":"modules/runner.py","language":"python","start_line":862,"end_line":863,"context_start_line":842,"context_end_line":883,"code":" torch.manual_seed(int(seed))\n except Exception:\n pass\n\n # Cache\n if not cache_dir:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n cache_dir = env_cache or os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\")), \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers) and adapter for context/scoring utilities\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n # Program-agnostic structured path: skip graph embedder; retain main flow\n _ps_path = program_state_path if (program_state_path is not None and str(program_state_path).strip()) else None\n state_prev = load_program_state(root, path=_ps_path)\n\n must_set: Set[Tuple[str, int, int]] = set()\n union_set: Set[Tuple[str, int, int]] = set()\n selected_mods_acc: Set[str] = set(state_prev.candidates_modules) if monotone_selection else set()\n selected_files_acc: Set[str] = set(state_prev.candidates_files) if monotone_selection else set()\n lfp_passes = 0\n converged = False","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._Ret","uri":"program://Program_Conditioned_Adapter/class/modules.runner._Ret#L861-L863","kind":"class","name":"_Ret","path":"modules/runner.py","language":"python","start_line":861,"end_line":863,"context_start_line":841,"context_end_line":883,"code":" np.random.seed(int(seed))\n torch.manual_seed(int(seed))\n except Exception:\n pass\n\n # Cache\n if not cache_dir:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n cache_dir = env_cache or os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\")), \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers) and adapter for context/scoring utilities\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n # Program-agnostic structured path: skip graph embedder; retain main flow\n _ps_path = program_state_path if (program_state_path is not None and str(program_state_path).strip()) else None\n state_prev = load_program_state(root, path=_ps_path)\n\n must_set: Set[Tuple[str, int, int]] = set()\n union_set: Set[Tuple[str, int, int]] = set()\n selected_mods_acc: Set[str] = set(state_prev.candidates_modules) if monotone_selection else set()\n selected_files_acc: Set[str] = set(state_prev.candidates_files) if monotone_selection else set()\n lfp_passes = 0\n converged = False","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner.__call__","uri":"program://Program_Conditioned_Adapter/function/modules.runner.__call__#L864-L868","kind":"function","name":"__call__","path":"modules/runner.py","language":"python","start_line":864,"end_line":868,"context_start_line":844,"context_end_line":888,"code":" pass\n\n # Cache\n if not cache_dir:\n env_cache = os.environ.get(\"TRANSFORMER_CACHE_DIR\") or os.environ.get(\"HF_HOME\")\n cache_dir = env_cache or os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\")), \"checkpoints\")\n try:\n os.makedirs(cache_dir, exist_ok=True)\n except Exception:\n pass\n\n # Local tokenizer (no transformers) and adapter for context/scoring utilities\n snap_dir = ensure_snapshot(model_id, cache_dir)\n tok_local = LocalLlamaTokenizer(snap_dir)\n class _TokAdapter:\n def __init__(self, base):\n self._b = base\n class _Ret:\n def __init__(self, ids):\n self.input_ids = ids\n def __call__(self, text: str, add_special_tokens: bool = False, return_tensors: str | None = None):\n ids = self._b.encode(text)\n if return_tensors == \"pt\":\n return {\"input_ids\": torch.tensor([ids], dtype=torch.long)}\n return _TokAdapter._Ret(ids)\n tok = _TokAdapter(tok_local)\n torch_dtype = torch.bfloat16 if str(device).startswith(\"cuda\") else torch.float32\n model, _cfg = build_local_llama_from_snapshot(snap_dir, device, torch_dtype, device_map=device_map, gpu_ids=gpu_ids)\n\n base_layers = load_adapters_npz(adapters_npz)[\"layers\"]\n # Program-agnostic structured path: skip graph embedder; retain main flow\n _ps_path = program_state_path if (program_state_path is not None and str(program_state_path).strip()) else None\n state_prev = load_program_state(root, path=_ps_path)\n\n must_set: Set[Tuple[str, int, int]] = set()\n union_set: Set[Tuple[str, int, int]] = set()\n selected_mods_acc: Set[str] = set(state_prev.candidates_modules) if monotone_selection else set()\n selected_files_acc: Set[str] = set(state_prev.candidates_files) if monotone_selection else set()\n lfp_passes = 0\n converged = False\n\n def _one_pass(mods_hint: List[str], files_hint: List[str]) -> Tuple[str, List[str], List[str], Dict[str, Any], Set[Tuple[str,int,int]], Set[Tuple[str,int,int]], Dict[str, Any]]:\n nonlocal model\n # Initial selection (question or zoom), union with hints for monotonicity\n ignore_list = [s for s in (ignore or []) if s]","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner._lev_norm","uri":"program://Program_Conditioned_Adapter/function/modules.runner._lev_norm#L1457-L1473","kind":"function","name":"_lev_norm","path":"modules/runner.py","language":"python","start_line":1457,"end_line":1473,"context_start_line":1437,"context_end_line":1493,"code":" \"selection\": {\n \"modules\": sorted(list(selected_mods_acc)),\n \"files\": sorted(list(selected_files_acc)),\n },\n \"lfp_passes\": int(lfp_passes),\n \"converged\": bool(converged),\n \"provenance\": {\"commit\": commit_sha},\n \"confidence\": float(confidence),\n \"metrics\": {\n \"citations_total\": int(len(union_set)),\n \"citations_must\": int(len(must)),\n \"modules_selected\": int(len(selected_mods_acc)),\n \"files_selected\": int(len(selected_files_acc)),\n \"elapsed_sec\": elapsed,\n \"retries\": int(retries_used),\n \"delta_norms\": (diag.get(\"delta_norms\") if isinstance(diag, dict) else None),\n },\n }\n # Optional signature edit distance (rough, per-citation snippet vs answer)\n try:\n def _lev_norm(a: str, b: str) -> float:\n a = a.strip()[:256]; b = b.strip()[:1024]\n if not a or not b:\n return 1.0\n la, lb = len(a), len(b)\n prev = list(range(lb + 1))\n cur = [0] * (lb + 1)\n for i in range(1, la + 1):\n cur[0] = i\n ai = a[i - 1]\n for j in range(1, lb + 1):\n cost = 0 if ai == b[j - 1] else 1\n cur[j] = min(cur[j - 1] + 1, prev[j] + 1, prev[j - 1] + cost)\n prev, cur = cur, prev\n dist = float(prev[lb])\n norm = dist / float(max(1, max(la, lb)))\n return max(0.0, min(1.0, norm))\n ed_vals: List[float] = []\n ans_txt = result[\"text\"] or \"\"\n for (rel, a_ln, b_ln) in result.get(\"citations\", [])[:6]:\n try:\n pth = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(root, rel))\n with open(pth, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n lines = fh.read().splitlines()\n a0 = max(1, int(a_ln)); b0 = min(len(lines), int(b_ln))\n snippet = \"\\n\".join(lines[a0 - 1 : b0])\n ed_vals.append(_lev_norm(snippet, ans_txt))\n except Exception:\n continue\n if ed_vals:\n result[\"metrics\"][\"signature_edit_mean\"] = float(sum(ed_vals) / float(len(ed_vals)))\n result[\"metrics\"][\"signature_edit_min\"] = float(min(ed_vals))\n except Exception:\n pass\n # Optional tests verification (best-effort, core is program-agnostic)\n if bool(telemetry_verify_tests):\n # Left as a no-op in core. Example backends can perform verification and","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.runner.tier_for","uri":"program://Program_Conditioned_Adapter/function/modules.runner.tier_for#L1203-L1206","kind":"function","name":"tier_for","path":"modules/runner.py","language":"python","start_line":1203,"end_line":1206,"context_start_line":1183,"context_end_line":1226,"code":" per_target_keep = {}\n for t, frac in base_frac.items():\n keep = int(max(1, min(int(scaled_rank), round(int(scaled_rank) * float(frac)))))\n per_target_keep[t] = keep\n budget = int(max(0, int(rank_budget)))\n if budget > 0 and per_target_keep:\n total = int(sum(int(v) for v in per_target_keep.values()))\n if total > budget:\n scale = float(budget) / float(max(1, total))\n for t in list(per_target_keep.keys()):\n per_target_keep[t] = int(max(1, round(int(per_target_keep[t]) * scale)))\n except Exception:\n per_target_keep = None\n # Optional per-layer rank tiers\n per_target_keep_layers: Optional[List[Dict[str, int]]] = None\n if bool(layer_rank_tiers):\n try:\n L_layers = len(base_layers or [])\n if L_layers > 0:\n per_target_keep_layers = []\n def tier_for(frac: float) -> str:\n if frac < (1.0/3.0): return \"low\"\n if frac < (2.0/3.0): return \"mid\"\n return \"top\"\n for i in range(L_layers):\n frac = float(i) / float(max(1, L_layers - 1))\n tier = tier_for(frac)\n if tier == \"low\":\n vals = {\"q_proj\": 2, \"k_proj\": 2, \"v_proj\": 8, \"o_proj\": 8, \"up_proj\": 12, \"down_proj\": 12}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"]))\n elif tier == \"mid\":\n vals = {\"q_proj\": 3, \"k_proj\": 3, \"v_proj\": 12, \"o_proj\": 12, \"up_proj\": 16, \"down_proj\": 16}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"]))\n else:\n vals = {\"q_proj\": 4, \"k_proj\": 4, \"v_proj\": 16, \"o_proj\": 16, \"up_proj\": 24, \"down_proj\": 24}\n vals[\"gate_proj\"] = min(8, int(0.5 * vals[\"up_proj\"]))\n for k in list(vals.keys()):\n vals[k] = int(max(1, min(int(scaled_rank), int(vals[k]))))\n per_target_keep_layers.append(vals)\n except Exception:\n per_target_keep_layers = None\n # Alpha warmup on first structured pass\n alpha_used = float(alpha * (0.5 if (bool(alpha_warmup) and int(lfp_passes) == 0) else 1.0))\n hooks = register_hook_mixed_adapters(","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations","uri":"program://Program_Conditioned_Adapter/module/modules.citations#L1-L138","kind":"module","name":"modules.citations","path":"modules/citations.py","language":"python","start_line":1,"end_line":138,"context_start_line":1,"context_end_line":138,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Any, Optional, Tuple\nimport hashlib\nimport time\nimport re\n\nfrom .program_graph import ProgramGraph, Span, ResolvedAnchor\n\n\n@dataclass\nclass CitationPolicy:\n enforce: bool = True\n per_paragraph: bool = False\n repair: bool = True\n\n\ndef _extract_identifier_tokens(text: str) -> List[str]:\n toks = re.findall(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\")\n toks2 = re.findall(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\")\n out: List[str] = []\n for t in toks + toks2:\n tt = t.split(\".\")[-1]\n if tt and tt not in out:\n out.append(tt)\n return out[:24]\n\n\nclass CitationManager:\n def __init__(self, policy: CitationPolicy, pg: ProgramGraph, manifest: Dict[str, Any]):\n self.policy = policy\n self.pg = pg\n self.manifest = manifest or {}\n\n def collect(self, region_entity_ids: List[str], contexts: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n # Best-effort: attach evidence from region seeds (first-class Program URIs would be passed by caller)\n evidence: List[Dict[str, Any]] = []\n for e in self.pg.entities():\n if e.id in region_entity_ids:\n try:\n # Resolve existing entity URI to get artifact span/hash\n ra = self.pg.resolve(e.uri)\n evidence.append({\n \"uri\": e.uri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": e.kind,\n \"confidence\": 0.5,\n \"retrieval\": {\"score\": 0.0, \"features\": {}},\n })\n except Exception:\n continue\n return evidence\n\n def enforce(self, draft_units: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n if not self.policy.enforce:\n return draft_units\n out = []\n for u in draft_units:\n ev = list(u.get(\"evidence\") or [])\n if self.policy.per_paragraph:\n paras = [p for p in (u.get(\"text\") or \"\").split(\"\\n\\n\") if p.strip()]\n if not paras:\n out.append(u)\n continue\n # If any paragraph lacks evidence, attempt repair (or drop)\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n if not ev:\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n out.append(u)\n return out\n\n def repair(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n # Try anchoring based on identifier tokens\n text = str(unit.get(\"text\") or \"\")\n tokens = _extract_identifier_tokens(text)\n anchors: List[Dict[str, Any]] = []\n for t in tokens:\n try:\n refs = list(self.pg.search_refs(t))\n except Exception:\n refs = []\n for (eid, sp) in refs[:2]:\n # find entity to get its canonical URI (if present)\n euri = None\n for e in self.pg.entities():\n if e.id == eid:\n euri = e.uri\n break\n try:\n if euri:\n ra = self.pg.resolve(euri)\n anchors.append({\n \"uri\": euri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": (e.kind if euri and e else \"entity\"),\n \"confidence\": 0.4,\n \"retrieval\": {\"score\": 0.0, \"features\": {\"repair\": True}},\n })\n except Exception:\n continue\n if len(anchors) >= 4:\n break\n if anchors:\n unit = dict(unit)\n unit[\"evidence\"] = anchors\n return unit\n\n def stamp_provenance(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n prov = {\n \"program_id\": self.manifest.get(\"program_id\"),\n \"manifest_sha\": self._manifest_sha(),\n \"commit\": self.manifest.get(\"commit\"),\n \"policy\": {\n \"enforce\": bool(self.policy.enforce),\n \"per_paragraph\": bool(self.policy.per_paragraph),\n \"repair\": bool(self.policy.repair),\n },\n \"ts\": time.time(),\n }\n out = dict(unit)\n out[\"provenance\"] = prov\n return out\n\n def _manifest_sha(self) -> Optional[str]:\n try:\n blob = str(self.manifest).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(blob).hexdigest()\n except Exception:\n return None\n\n","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations.CitationPolicy","uri":"program://Program_Conditioned_Adapter/class/modules.citations.CitationPolicy#L13-L16","kind":"class","name":"CitationPolicy","path":"modules/citations.py","language":"python","start_line":13,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Any, Optional, Tuple\nimport hashlib\nimport time\nimport re\n\nfrom .program_graph import ProgramGraph, Span, ResolvedAnchor\n\n\n@dataclass\nclass CitationPolicy:\n enforce: bool = True\n per_paragraph: bool = False\n repair: bool = True\n\n\ndef _extract_identifier_tokens(text: str) -> List[str]:\n toks = re.findall(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\")\n toks2 = re.findall(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\")\n out: List[str] = []\n for t in toks + toks2:\n tt = t.split(\".\")[-1]\n if tt and tt not in out:\n out.append(tt)\n return out[:24]\n\n\nclass CitationManager:\n def __init__(self, policy: CitationPolicy, pg: ProgramGraph, manifest: Dict[str, Any]):\n self.policy = policy\n self.pg = pg\n self.manifest = manifest or {}\n\n def collect(self, region_entity_ids: List[str], contexts: List[Dict[str, Any]]) -> List[Dict[str, Any]]:","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations._extract_identifier_tokens","uri":"program://Program_Conditioned_Adapter/function/modules.citations._extract_identifier_tokens#L19-L27","kind":"function","name":"_extract_identifier_tokens","path":"modules/citations.py","language":"python","start_line":19,"end_line":27,"context_start_line":1,"context_end_line":47,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Any, Optional, Tuple\nimport hashlib\nimport time\nimport re\n\nfrom .program_graph import ProgramGraph, Span, ResolvedAnchor\n\n\n@dataclass\nclass CitationPolicy:\n enforce: bool = True\n per_paragraph: bool = False\n repair: bool = True\n\n\ndef _extract_identifier_tokens(text: str) -> List[str]:\n toks = re.findall(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\")\n toks2 = re.findall(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\")\n out: List[str] = []\n for t in toks + toks2:\n tt = t.split(\".\")[-1]\n if tt and tt not in out:\n out.append(tt)\n return out[:24]\n\n\nclass CitationManager:\n def __init__(self, policy: CitationPolicy, pg: ProgramGraph, manifest: Dict[str, Any]):\n self.policy = policy\n self.pg = pg\n self.manifest = manifest or {}\n\n def collect(self, region_entity_ids: List[str], contexts: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n # Best-effort: attach evidence from region seeds (first-class Program URIs would be passed by caller)\n evidence: List[Dict[str, Any]] = []\n for e in self.pg.entities():\n if e.id in region_entity_ids:\n try:\n # Resolve existing entity URI to get artifact span/hash\n ra = self.pg.resolve(e.uri)\n evidence.append({\n \"uri\": e.uri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations.CitationManager","uri":"program://Program_Conditioned_Adapter/class/modules.citations.CitationManager#L30-L136","kind":"class","name":"CitationManager","path":"modules/citations.py","language":"python","start_line":30,"end_line":136,"context_start_line":10,"context_end_line":138,"code":"\n\n@dataclass\nclass CitationPolicy:\n enforce: bool = True\n per_paragraph: bool = False\n repair: bool = True\n\n\ndef _extract_identifier_tokens(text: str) -> List[str]:\n toks = re.findall(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\")\n toks2 = re.findall(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\")\n out: List[str] = []\n for t in toks + toks2:\n tt = t.split(\".\")[-1]\n if tt and tt not in out:\n out.append(tt)\n return out[:24]\n\n\nclass CitationManager:\n def __init__(self, policy: CitationPolicy, pg: ProgramGraph, manifest: Dict[str, Any]):\n self.policy = policy\n self.pg = pg\n self.manifest = manifest or {}\n\n def collect(self, region_entity_ids: List[str], contexts: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n # Best-effort: attach evidence from region seeds (first-class Program URIs would be passed by caller)\n evidence: List[Dict[str, Any]] = []\n for e in self.pg.entities():\n if e.id in region_entity_ids:\n try:\n # Resolve existing entity URI to get artifact span/hash\n ra = self.pg.resolve(e.uri)\n evidence.append({\n \"uri\": e.uri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": e.kind,\n \"confidence\": 0.5,\n \"retrieval\": {\"score\": 0.0, \"features\": {}},\n })\n except Exception:\n continue\n return evidence\n\n def enforce(self, draft_units: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n if not self.policy.enforce:\n return draft_units\n out = []\n for u in draft_units:\n ev = list(u.get(\"evidence\") or [])\n if self.policy.per_paragraph:\n paras = [p for p in (u.get(\"text\") or \"\").split(\"\\n\\n\") if p.strip()]\n if not paras:\n out.append(u)\n continue\n # If any paragraph lacks evidence, attempt repair (or drop)\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n if not ev:\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n out.append(u)\n return out\n\n def repair(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n # Try anchoring based on identifier tokens\n text = str(unit.get(\"text\") or \"\")\n tokens = _extract_identifier_tokens(text)\n anchors: List[Dict[str, Any]] = []\n for t in tokens:\n try:\n refs = list(self.pg.search_refs(t))\n except Exception:\n refs = []\n for (eid, sp) in refs[:2]:\n # find entity to get its canonical URI (if present)\n euri = None\n for e in self.pg.entities():\n if e.id == eid:\n euri = e.uri\n break\n try:\n if euri:\n ra = self.pg.resolve(euri)\n anchors.append({\n \"uri\": euri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": (e.kind if euri and e else \"entity\"),\n \"confidence\": 0.4,\n \"retrieval\": {\"score\": 0.0, \"features\": {\"repair\": True}},\n })\n except Exception:\n continue\n if len(anchors) >= 4:\n break\n if anchors:\n unit = dict(unit)\n unit[\"evidence\"] = anchors\n return unit\n\n def stamp_provenance(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n prov = {\n \"program_id\": self.manifest.get(\"program_id\"),\n \"manifest_sha\": self._manifest_sha(),\n \"commit\": self.manifest.get(\"commit\"),\n \"policy\": {\n \"enforce\": bool(self.policy.enforce),\n \"per_paragraph\": bool(self.policy.per_paragraph),\n \"repair\": bool(self.policy.repair),\n },\n \"ts\": time.time(),\n }\n out = dict(unit)\n out[\"provenance\"] = prov\n return out\n\n def _manifest_sha(self) -> Optional[str]:\n try:\n blob = str(self.manifest).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(blob).hexdigest()\n except Exception:\n return None\n\n","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations.__init__","uri":"program://Program_Conditioned_Adapter/function/modules.citations.__init__#L31-L34","kind":"function","name":"__init__","path":"modules/citations.py","language":"python","start_line":31,"end_line":34,"context_start_line":11,"context_end_line":54,"code":"\n@dataclass\nclass CitationPolicy:\n enforce: bool = True\n per_paragraph: bool = False\n repair: bool = True\n\n\ndef _extract_identifier_tokens(text: str) -> List[str]:\n toks = re.findall(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\")\n toks2 = re.findall(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\")\n out: List[str] = []\n for t in toks + toks2:\n tt = t.split(\".\")[-1]\n if tt and tt not in out:\n out.append(tt)\n return out[:24]\n\n\nclass CitationManager:\n def __init__(self, policy: CitationPolicy, pg: ProgramGraph, manifest: Dict[str, Any]):\n self.policy = policy\n self.pg = pg\n self.manifest = manifest or {}\n\n def collect(self, region_entity_ids: List[str], contexts: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n # Best-effort: attach evidence from region seeds (first-class Program URIs would be passed by caller)\n evidence: List[Dict[str, Any]] = []\n for e in self.pg.entities():\n if e.id in region_entity_ids:\n try:\n # Resolve existing entity URI to get artifact span/hash\n ra = self.pg.resolve(e.uri)\n evidence.append({\n \"uri\": e.uri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": e.kind,\n \"confidence\": 0.5,\n \"retrieval\": {\"score\": 0.0, \"features\": {}},\n })\n except Exception:\n continue\n return evidence","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations.collect","uri":"program://Program_Conditioned_Adapter/function/modules.citations.collect#L36-L54","kind":"function","name":"collect","path":"modules/citations.py","language":"python","start_line":36,"end_line":54,"context_start_line":16,"context_end_line":74,"code":" repair: bool = True\n\n\ndef _extract_identifier_tokens(text: str) -> List[str]:\n toks = re.findall(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\")\n toks2 = re.findall(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\")\n out: List[str] = []\n for t in toks + toks2:\n tt = t.split(\".\")[-1]\n if tt and tt not in out:\n out.append(tt)\n return out[:24]\n\n\nclass CitationManager:\n def __init__(self, policy: CitationPolicy, pg: ProgramGraph, manifest: Dict[str, Any]):\n self.policy = policy\n self.pg = pg\n self.manifest = manifest or {}\n\n def collect(self, region_entity_ids: List[str], contexts: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n # Best-effort: attach evidence from region seeds (first-class Program URIs would be passed by caller)\n evidence: List[Dict[str, Any]] = []\n for e in self.pg.entities():\n if e.id in region_entity_ids:\n try:\n # Resolve existing entity URI to get artifact span/hash\n ra = self.pg.resolve(e.uri)\n evidence.append({\n \"uri\": e.uri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": e.kind,\n \"confidence\": 0.5,\n \"retrieval\": {\"score\": 0.0, \"features\": {}},\n })\n except Exception:\n continue\n return evidence\n\n def enforce(self, draft_units: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n if not self.policy.enforce:\n return draft_units\n out = []\n for u in draft_units:\n ev = list(u.get(\"evidence\") or [])\n if self.policy.per_paragraph:\n paras = [p for p in (u.get(\"text\") or \"\").split(\"\\n\\n\") if p.strip()]\n if not paras:\n out.append(u)\n continue\n # If any paragraph lacks evidence, attempt repair (or drop)\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n if not ev:\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations.enforce","uri":"program://Program_Conditioned_Adapter/function/modules.citations.enforce#L56-L76","kind":"function","name":"enforce","path":"modules/citations.py","language":"python","start_line":56,"end_line":76,"context_start_line":36,"context_end_line":96,"code":" def collect(self, region_entity_ids: List[str], contexts: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n # Best-effort: attach evidence from region seeds (first-class Program URIs would be passed by caller)\n evidence: List[Dict[str, Any]] = []\n for e in self.pg.entities():\n if e.id in region_entity_ids:\n try:\n # Resolve existing entity URI to get artifact span/hash\n ra = self.pg.resolve(e.uri)\n evidence.append({\n \"uri\": e.uri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": e.kind,\n \"confidence\": 0.5,\n \"retrieval\": {\"score\": 0.0, \"features\": {}},\n })\n except Exception:\n continue\n return evidence\n\n def enforce(self, draft_units: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n if not self.policy.enforce:\n return draft_units\n out = []\n for u in draft_units:\n ev = list(u.get(\"evidence\") or [])\n if self.policy.per_paragraph:\n paras = [p for p in (u.get(\"text\") or \"\").split(\"\\n\\n\") if p.strip()]\n if not paras:\n out.append(u)\n continue\n # If any paragraph lacks evidence, attempt repair (or drop)\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n if not ev:\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n out.append(u)\n return out\n\n def repair(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n # Try anchoring based on identifier tokens\n text = str(unit.get(\"text\") or \"\")\n tokens = _extract_identifier_tokens(text)\n anchors: List[Dict[str, Any]] = []\n for t in tokens:\n try:\n refs = list(self.pg.search_refs(t))\n except Exception:\n refs = []\n for (eid, sp) in refs[:2]:\n # find entity to get its canonical URI (if present)\n euri = None\n for e in self.pg.entities():\n if e.id == eid:\n euri = e.uri\n break\n try:\n if euri:","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations.repair","uri":"program://Program_Conditioned_Adapter/function/modules.citations.repair#L78-L113","kind":"function","name":"repair","path":"modules/citations.py","language":"python","start_line":78,"end_line":113,"context_start_line":58,"context_end_line":133,"code":" return draft_units\n out = []\n for u in draft_units:\n ev = list(u.get(\"evidence\") or [])\n if self.policy.per_paragraph:\n paras = [p for p in (u.get(\"text\") or \"\").split(\"\\n\\n\") if p.strip()]\n if not paras:\n out.append(u)\n continue\n # If any paragraph lacks evidence, attempt repair (or drop)\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n if not ev:\n repaired = self.repair(u) if self.policy.repair else u\n out.append(repaired)\n else:\n out.append(u)\n return out\n\n def repair(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n # Try anchoring based on identifier tokens\n text = str(unit.get(\"text\") or \"\")\n tokens = _extract_identifier_tokens(text)\n anchors: List[Dict[str, Any]] = []\n for t in tokens:\n try:\n refs = list(self.pg.search_refs(t))\n except Exception:\n refs = []\n for (eid, sp) in refs[:2]:\n # find entity to get its canonical URI (if present)\n euri = None\n for e in self.pg.entities():\n if e.id == eid:\n euri = e.uri\n break\n try:\n if euri:\n ra = self.pg.resolve(euri)\n anchors.append({\n \"uri\": euri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": (e.kind if euri and e else \"entity\"),\n \"confidence\": 0.4,\n \"retrieval\": {\"score\": 0.0, \"features\": {\"repair\": True}},\n })\n except Exception:\n continue\n if len(anchors) >= 4:\n break\n if anchors:\n unit = dict(unit)\n unit[\"evidence\"] = anchors\n return unit\n\n def stamp_provenance(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n prov = {\n \"program_id\": self.manifest.get(\"program_id\"),\n \"manifest_sha\": self._manifest_sha(),\n \"commit\": self.manifest.get(\"commit\"),\n \"policy\": {\n \"enforce\": bool(self.policy.enforce),\n \"per_paragraph\": bool(self.policy.per_paragraph),\n \"repair\": bool(self.policy.repair),\n },\n \"ts\": time.time(),\n }\n out = dict(unit)\n out[\"provenance\"] = prov\n return out\n\n def _manifest_sha(self) -> Optional[str]:\n try:\n blob = str(self.manifest).encode(\"utf-8\", errors=\"ignore\")","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations.stamp_provenance","uri":"program://Program_Conditioned_Adapter/function/modules.citations.stamp_provenance#L115-L129","kind":"function","name":"stamp_provenance","path":"modules/citations.py","language":"python","start_line":115,"end_line":129,"context_start_line":95,"context_end_line":138,"code":" try:\n if euri:\n ra = self.pg.resolve(euri)\n anchors.append({\n \"uri\": euri,\n \"artifact_hash\": ra.hash,\n \"span\": {\"start\": {\"line\": ra.span.start_line}, \"end\": {\"line\": ra.span.end_line}},\n \"kind\": (e.kind if euri and e else \"entity\"),\n \"confidence\": 0.4,\n \"retrieval\": {\"score\": 0.0, \"features\": {\"repair\": True}},\n })\n except Exception:\n continue\n if len(anchors) >= 4:\n break\n if anchors:\n unit = dict(unit)\n unit[\"evidence\"] = anchors\n return unit\n\n def stamp_provenance(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n prov = {\n \"program_id\": self.manifest.get(\"program_id\"),\n \"manifest_sha\": self._manifest_sha(),\n \"commit\": self.manifest.get(\"commit\"),\n \"policy\": {\n \"enforce\": bool(self.policy.enforce),\n \"per_paragraph\": bool(self.policy.per_paragraph),\n \"repair\": bool(self.policy.repair),\n },\n \"ts\": time.time(),\n }\n out = dict(unit)\n out[\"provenance\"] = prov\n return out\n\n def _manifest_sha(self) -> Optional[str]:\n try:\n blob = str(self.manifest).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(blob).hexdigest()\n except Exception:\n return None\n\n","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:modules.citations._manifest_sha","uri":"program://Program_Conditioned_Adapter/function/modules.citations._manifest_sha#L131-L136","kind":"function","name":"_manifest_sha","path":"modules/citations.py","language":"python","start_line":131,"end_line":136,"context_start_line":111,"context_end_line":138,"code":" unit = dict(unit)\n unit[\"evidence\"] = anchors\n return unit\n\n def stamp_provenance(self, unit: Dict[str, Any]) -> Dict[str, Any]:\n prov = {\n \"program_id\": self.manifest.get(\"program_id\"),\n \"manifest_sha\": self._manifest_sha(),\n \"commit\": self.manifest.get(\"commit\"),\n \"policy\": {\n \"enforce\": bool(self.policy.enforce),\n \"per_paragraph\": bool(self.policy.per_paragraph),\n \"repair\": bool(self.policy.repair),\n },\n \"ts\": time.time(),\n }\n out = dict(unit)\n out[\"provenance\"] = prov\n return out\n\n def _manifest_sha(self) -> Optional[str]:\n try:\n blob = str(self.manifest).encode(\"utf-8\", errors=\"ignore\")\n return hashlib.sha256(blob).hexdigest()\n except Exception:\n return None\n\n","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_eval_datasets","uri":"program://Program_Conditioned_Adapter/module/examples.dataset_trainer.run_eval_datasets#L1-L237","kind":"module","name":"examples.dataset_trainer.run_eval_datasets","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":1,"end_line":237,"context_start_line":1,"context_end_line":237,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Dict, Any, List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\n\n@dataclass\nclass EvalPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\tstate_path: Path\n\n\ndef _ensure_dirs(paths: EvalPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef main() -> None:\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"datasets\" / \"hf_lora_mbpp\"\n\tpaths = EvalPaths(\n\t\tadapters_dir=artifacts_root / \"lora_adapter\",\n\t\tout_dir=artifacts_root / \"outputs\",\n\t\tstate_path=artifacts_root / \".program_state.json\",\n\t)\n\t_ensure_dirs(paths)\n\n\t# If not specified, prefer GPUs 0,1\n\tif not os.getenv(\"CUDA_VISIBLE_DEVICES\"):\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n\tmodel_id = os.getenv(\"LLAMA_MODEL_ID\", \"meta-llama/Llama-3.1-8B-Instruct\")\n\tadapter_path = (os.getenv(\"ADAPTER_PATH\") or str(paths.adapters_dir)).strip()\n\tcontext_len = int(os.getenv(\"MAX_LEN\", \"256\"))\n\tper_device_batch = int(os.getenv(\"BATCH_SIZE\", \"1\"))\n\tgrad_accum = int(os.getenv(\"GRAD_ACCUM_STEPS\", \"8\"))\n\tlogging_steps = int(os.getenv(\"LOG_STEPS\", \"25\"))\n\tseed = int(os.getenv(\"SEED\", \"0\"))\n\tuse_qlora = os.getenv(\"USE_QLORA\", \"1\") == \"1\"\n\n\ttorch.manual_seed(seed)\n\tif torch.cuda.is_available():\n\t\ttorch.cuda.manual_seed_all(seed)\n\n\tfrom transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling # type: ignore\n\ttry:\n\t\tfrom peft import get_peft_model, prepare_model_for_kbit_training, PeftModel # type: ignore\n\texcept Exception as e:\n\t\tprint(f\"[eval] PEFT not available: {e}\")\n\t\tprint(\"Install 'peft' if you want to evaluate LoRA adapters.\")\n\t\tPeftModel = None # type: ignore\n\n\ttok = AutoTokenizer.from_pretrained(model_id)\n\tif tok.pad_token_id is None and tok.eos_token is not None:\n\t\ttok.pad_token = tok.eos_token\n\tpad_id = int(tok.pad_token_id or 0)\n\n\tprint(f\"[eval] loading base model {model_id}\")\n\tmodel_kwargs: Dict[str, Any] = {}\n\tvisible = os.environ.get(\"CUDA_VISIBLE_DEVICES\", \"\")\n\tnum_visible = len([v for v in visible.split(\",\") if v.strip()]) if visible else torch.cuda.device_count()\n\tif use_qlora:\n\t\ttry:\n\t\t\tfrom transformers import BitsAndBytesConfig # type: ignore\n\t\t\tbnb_config = BitsAndBytesConfig(\n\t\t\t\tload_in_4bit=True,\n\t\t\t\tbnb_4bit_quant_type=\"nf4\",\n\t\t\t\tbnb_4bit_compute_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n\t\t\t)\n\t\t\tmodel_kwargs[\"quantization_config\"] = bnb_config\n\t\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\texcept Exception as e:\n\t\t\tprint(f\"[eval] BitsAndBytes not available, evaluating in full precision: {e}\")\n\t\t\tmodel_kwargs[\"device_map\"] = None\n\t\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\telse:\n\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tbase_model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)\n\ttry:\n\t\tbase_model.config.use_cache = False\n\texcept Exception:\n\t\tpass\n\tdevice_map_used = model_kwargs.get(\"device_map\", None) is not None\n\tif not device_map_used:\n\t\tbase_model = base_model.to(device)\n\tif use_qlora:\n\t\ttry:\n\t\t\tbase_model = prepare_model_for_kbit_training(base_model) # type: ignore\n\t\texcept Exception:\n\t\t\tpass\n\tbase_model.eval()\n\n\t@torch.no_grad()\n\tdef evaluate_model(model: Any, tokenized_ds: Any, pad_token_id: int) -> Dict[str, float]:\n\t\tloader = DataLoader(tokenized_ds, batch_size=per_device_batch, shuffle=False)\n\t\ttotal_loss = 0.0\n\t\tnum_batches = 0\n\t\tfor batch in loader:\n\t\t\tlabels = batch.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\t# create labels equal to input_ids to do next-token prediction\n\t\t\t\tlabels = batch[\"input_ids\"].clone()\n\t\t\t\tbatch[\"labels\"] = labels\n\t\t\t# Move to device only when not using a sharded device_map\n\t\t\tif not device_map_used:\n\t\t\t\tfor k, v in list(batch.items()):\n\t\t\t\t\tif torch.is_tensor(v):\n\t\t\t\t\t\tbatch[k] = v.to(device)\n\t\t\tinp = {k: v for k, v in batch.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = batch[\"labels\"][..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(pad_token_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\ttotal_loss += float(loss.item())\n\t\t\tnum_batches += 1\n\t\tavg_loss = total_loss / max(1, num_batches)\n\t\ttry:\n\t\t\tppl = float(torch.exp(torch.tensor(avg_loss)).item())\n\t\texcept Exception:\n\t\t\tppl = float(\"inf\")\n\t\treturn {\"loss\": avg_loss, \"perplexity\": ppl}\n\n\t# Data util\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tdef make_dataset(texts: List[str]) -> Any:\n\t\traw = HFDataset.from_dict({\"text\": texts})\n\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\treturn out\n\t\treturn raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\n\t# Collator\n\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\n\t# Resolve datasets one-by-one (sequential, full by default)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\ttrain_new_only = False # eval should not skip previously seen\n\titer_program_texts = None\n\ttry:\n\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\titer_program_texts = _ipt\n\texcept Exception:\n\t\ttry:\n\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\tif _spec and _spec.loader:\n\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\titer_program_texts = getattr(_mod, \"iter_program_texts\", None) # type: ignore\n\t\texcept Exception:\n\t\t\titer_program_texts = None\n\tif iter_program_texts is None:\n\t\tprint(\"[eval] could not resolve datasets loader\")\n\t\tsys.exit(2)\n\n\tresults: Dict[str, Dict[str, Any]] = {}\n\n\t# Evaluate baseline first\n\tfor texts, src_name in iter_program_texts(\n\t\texample_dir=str(example_dir),\n\t\tconfig_path=datasets_cfg or None,\n\t\ttrain_new_only=train_new_only,\n\t\tstate_path=str(paths.state_path),\n\t):\n\t\tif not texts:\n\t\t\tcontinue\n\t\tds = make_dataset(texts)\n\t\tds = ds.with_format(\"torch\")\n\t\tprint(f\"[eval] baseline on {src_name} ({len(texts)} samples)\")\n\t\tmetrics = evaluate_model(base_model, ds, pad_id)\n\t\tresults[src_name] = results.get(src_name, {})\n\t\tresults[src_name][\"baseline\"] = {\n\t\t\t\"loss\": float(metrics.get(\"loss\", 0.0)),\n\t\t\t\"perplexity\": float(metrics.get(\"perplexity\", 0.0)),\n\t\t}\n\n\t# Attach LoRA and evaluate finetuned\n\tmodel_ft = base_model\n\tif PeftModel is not None and adapter_path and Path(adapter_path).exists():\n\t\tprint(f\"[eval] loading adapter from {adapter_path}\")\n\t\tmodel_ft = PeftModel.from_pretrained(base_model, adapter_path, is_trainable=False) # type: ignore\n\telse:\n\t\tprint(f\"[eval] adapter not found or PEFT missing; skipping finetuned eval: {adapter_path}\")\n\tmodel_ft.eval()\n\tfor texts, src_name in iter_program_texts(\n\t\texample_dir=str(example_dir),\n\t\tconfig_path=datasets_cfg or None,\n\t\ttrain_new_only=train_new_only,\n\t\tstate_path=str(paths.state_path),\n\t):\n\t\tif not texts:\n\t\t\tcontinue\n\t\tds = make_dataset(texts)\n\t\tds = ds.with_format(\"torch\")\n\t\tprint(f\"[eval] finetuned on {src_name} ({len(texts)} samples)\")\n\t\tmetrics = evaluate_model(model_ft, ds, pad_id)\n\t\tresults[src_name] = results.get(src_name, {})\n\t\tresults[src_name][\"finetuned\"] = {\n\t\t\t\"loss\": float(metrics.get(\"loss\", 0.0)),\n\t\t\t\"perplexity\": float(metrics.get(\"perplexity\", 0.0)),\n\t\t}\n\n\t# Save combined report\n\treport = {\n\t\t\"model_id\": model_id,\n\t\t\"adapter_path\": str(adapter_path),\n\t\t\"results\": results,\n\t}\n\twith open(paths.out_dir / \"EvalComparison.json\", \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"EvalComparison\": report}, indent=2))\n\tprint(str(paths.out_dir / \"EvalComparison.json\"))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_eval_datasets.EvalPaths","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.run_eval_datasets.EvalPaths#L16-L19","kind":"class","name":"EvalPaths","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":16,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Dict, Any, List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\n\n@dataclass\nclass EvalPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\tstate_path: Path\n\n\ndef _ensure_dirs(paths: EvalPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef main() -> None:\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"datasets\" / \"hf_lora_mbpp\"\n\tpaths = EvalPaths(\n\t\tadapters_dir=artifacts_root / \"lora_adapter\",\n\t\tout_dir=artifacts_root / \"outputs\",\n\t\tstate_path=artifacts_root / \".program_state.json\",\n\t)\n\t_ensure_dirs(paths)\n\n\t# If not specified, prefer GPUs 0,1\n\tif not os.getenv(\"CUDA_VISIBLE_DEVICES\"):\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_eval_datasets._ensure_dirs","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_eval_datasets._ensure_dirs#L22-L24","kind":"function","name":"_ensure_dirs","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":22,"end_line":24,"context_start_line":2,"context_end_line":44,"code":"\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Dict, Any, List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\n\n@dataclass\nclass EvalPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\tstate_path: Path\n\n\ndef _ensure_dirs(paths: EvalPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef main() -> None:\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"datasets\" / \"hf_lora_mbpp\"\n\tpaths = EvalPaths(\n\t\tadapters_dir=artifacts_root / \"lora_adapter\",\n\t\tout_dir=artifacts_root / \"outputs\",\n\t\tstate_path=artifacts_root / \".program_state.json\",\n\t)\n\t_ensure_dirs(paths)\n\n\t# If not specified, prefer GPUs 0,1\n\tif not os.getenv(\"CUDA_VISIBLE_DEVICES\"):\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n\tmodel_id = os.getenv(\"LLAMA_MODEL_ID\", \"meta-llama/Llama-3.1-8B-Instruct\")\n\tadapter_path = (os.getenv(\"ADAPTER_PATH\") or str(paths.adapters_dir)).strip()\n\tcontext_len = int(os.getenv(\"MAX_LEN\", \"256\"))\n\tper_device_batch = int(os.getenv(\"BATCH_SIZE\", \"1\"))","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_eval_datasets.main","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_eval_datasets.main#L27-L231","kind":"function","name":"main","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":27,"end_line":231,"context_start_line":7,"context_end_line":237,"code":"from pathlib import Path\nfrom typing import Dict, Any, List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\n\n@dataclass\nclass EvalPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\tstate_path: Path\n\n\ndef _ensure_dirs(paths: EvalPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef main() -> None:\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"datasets\" / \"hf_lora_mbpp\"\n\tpaths = EvalPaths(\n\t\tadapters_dir=artifacts_root / \"lora_adapter\",\n\t\tout_dir=artifacts_root / \"outputs\",\n\t\tstate_path=artifacts_root / \".program_state.json\",\n\t)\n\t_ensure_dirs(paths)\n\n\t# If not specified, prefer GPUs 0,1\n\tif not os.getenv(\"CUDA_VISIBLE_DEVICES\"):\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n\tmodel_id = os.getenv(\"LLAMA_MODEL_ID\", \"meta-llama/Llama-3.1-8B-Instruct\")\n\tadapter_path = (os.getenv(\"ADAPTER_PATH\") or str(paths.adapters_dir)).strip()\n\tcontext_len = int(os.getenv(\"MAX_LEN\", \"256\"))\n\tper_device_batch = int(os.getenv(\"BATCH_SIZE\", \"1\"))\n\tgrad_accum = int(os.getenv(\"GRAD_ACCUM_STEPS\", \"8\"))\n\tlogging_steps = int(os.getenv(\"LOG_STEPS\", \"25\"))\n\tseed = int(os.getenv(\"SEED\", \"0\"))\n\tuse_qlora = os.getenv(\"USE_QLORA\", \"1\") == \"1\"\n\n\ttorch.manual_seed(seed)\n\tif torch.cuda.is_available():\n\t\ttorch.cuda.manual_seed_all(seed)\n\n\tfrom transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling # type: ignore\n\ttry:\n\t\tfrom peft import get_peft_model, prepare_model_for_kbit_training, PeftModel # type: ignore\n\texcept Exception as e:\n\t\tprint(f\"[eval] PEFT not available: {e}\")\n\t\tprint(\"Install 'peft' if you want to evaluate LoRA adapters.\")\n\t\tPeftModel = None # type: ignore\n\n\ttok = AutoTokenizer.from_pretrained(model_id)\n\tif tok.pad_token_id is None and tok.eos_token is not None:\n\t\ttok.pad_token = tok.eos_token\n\tpad_id = int(tok.pad_token_id or 0)\n\n\tprint(f\"[eval] loading base model {model_id}\")\n\tmodel_kwargs: Dict[str, Any] = {}\n\tvisible = os.environ.get(\"CUDA_VISIBLE_DEVICES\", \"\")\n\tnum_visible = len([v for v in visible.split(\",\") if v.strip()]) if visible else torch.cuda.device_count()\n\tif use_qlora:\n\t\ttry:\n\t\t\tfrom transformers import BitsAndBytesConfig # type: ignore\n\t\t\tbnb_config = BitsAndBytesConfig(\n\t\t\t\tload_in_4bit=True,\n\t\t\t\tbnb_4bit_quant_type=\"nf4\",\n\t\t\t\tbnb_4bit_compute_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n\t\t\t)\n\t\t\tmodel_kwargs[\"quantization_config\"] = bnb_config\n\t\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\texcept Exception as e:\n\t\t\tprint(f\"[eval] BitsAndBytes not available, evaluating in full precision: {e}\")\n\t\t\tmodel_kwargs[\"device_map\"] = None\n\t\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\telse:\n\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tbase_model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)\n\ttry:\n\t\tbase_model.config.use_cache = False\n\texcept Exception:\n\t\tpass\n\tdevice_map_used = model_kwargs.get(\"device_map\", None) is not None\n\tif not device_map_used:\n\t\tbase_model = base_model.to(device)\n\tif use_qlora:\n\t\ttry:\n\t\t\tbase_model = prepare_model_for_kbit_training(base_model) # type: ignore\n\t\texcept Exception:\n\t\t\tpass\n\tbase_model.eval()\n\n\t@torch.no_grad()\n\tdef evaluate_model(model: Any, tokenized_ds: Any, pad_token_id: int) -> Dict[str, float]:\n\t\tloader = DataLoader(tokenized_ds, batch_size=per_device_batch, shuffle=False)\n\t\ttotal_loss = 0.0\n\t\tnum_batches = 0\n\t\tfor batch in loader:\n\t\t\tlabels = batch.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\t# create labels equal to input_ids to do next-token prediction\n\t\t\t\tlabels = batch[\"input_ids\"].clone()\n\t\t\t\tbatch[\"labels\"] = labels\n\t\t\t# Move to device only when not using a sharded device_map\n\t\t\tif not device_map_used:\n\t\t\t\tfor k, v in list(batch.items()):\n\t\t\t\t\tif torch.is_tensor(v):\n\t\t\t\t\t\tbatch[k] = v.to(device)\n\t\t\tinp = {k: v for k, v in batch.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = batch[\"labels\"][..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(pad_token_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\ttotal_loss += float(loss.item())\n\t\t\tnum_batches += 1\n\t\tavg_loss = total_loss / max(1, num_batches)\n\t\ttry:\n\t\t\tppl = float(torch.exp(torch.tensor(avg_loss)).item())\n\t\texcept Exception:\n\t\t\tppl = float(\"inf\")\n\t\treturn {\"loss\": avg_loss, \"perplexity\": ppl}\n\n\t# Data util\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tdef make_dataset(texts: List[str]) -> Any:\n\t\traw = HFDataset.from_dict({\"text\": texts})\n\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\treturn out\n\t\treturn raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\n\t# Collator\n\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\n\t# Resolve datasets one-by-one (sequential, full by default)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\ttrain_new_only = False # eval should not skip previously seen\n\titer_program_texts = None\n\ttry:\n\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\titer_program_texts = _ipt\n\texcept Exception:\n\t\ttry:\n\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\tif _spec and _spec.loader:\n\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\titer_program_texts = getattr(_mod, \"iter_program_texts\", None) # type: ignore\n\t\texcept Exception:\n\t\t\titer_program_texts = None\n\tif iter_program_texts is None:\n\t\tprint(\"[eval] could not resolve datasets loader\")\n\t\tsys.exit(2)\n\n\tresults: Dict[str, Dict[str, Any]] = {}\n\n\t# Evaluate baseline first\n\tfor texts, src_name in iter_program_texts(\n\t\texample_dir=str(example_dir),\n\t\tconfig_path=datasets_cfg or None,\n\t\ttrain_new_only=train_new_only,\n\t\tstate_path=str(paths.state_path),\n\t):\n\t\tif not texts:\n\t\t\tcontinue\n\t\tds = make_dataset(texts)\n\t\tds = ds.with_format(\"torch\")\n\t\tprint(f\"[eval] baseline on {src_name} ({len(texts)} samples)\")\n\t\tmetrics = evaluate_model(base_model, ds, pad_id)\n\t\tresults[src_name] = results.get(src_name, {})\n\t\tresults[src_name][\"baseline\"] = {\n\t\t\t\"loss\": float(metrics.get(\"loss\", 0.0)),\n\t\t\t\"perplexity\": float(metrics.get(\"perplexity\", 0.0)),\n\t\t}\n\n\t# Attach LoRA and evaluate finetuned\n\tmodel_ft = base_model\n\tif PeftModel is not None and adapter_path and Path(adapter_path).exists():\n\t\tprint(f\"[eval] loading adapter from {adapter_path}\")\n\t\tmodel_ft = PeftModel.from_pretrained(base_model, adapter_path, is_trainable=False) # type: ignore\n\telse:\n\t\tprint(f\"[eval] adapter not found or PEFT missing; skipping finetuned eval: {adapter_path}\")\n\tmodel_ft.eval()\n\tfor texts, src_name in iter_program_texts(\n\t\texample_dir=str(example_dir),\n\t\tconfig_path=datasets_cfg or None,\n\t\ttrain_new_only=train_new_only,\n\t\tstate_path=str(paths.state_path),\n\t):\n\t\tif not texts:\n\t\t\tcontinue\n\t\tds = make_dataset(texts)\n\t\tds = ds.with_format(\"torch\")\n\t\tprint(f\"[eval] finetuned on {src_name} ({len(texts)} samples)\")\n\t\tmetrics = evaluate_model(model_ft, ds, pad_id)\n\t\tresults[src_name] = results.get(src_name, {})\n\t\tresults[src_name][\"finetuned\"] = {\n\t\t\t\"loss\": float(metrics.get(\"loss\", 0.0)),\n\t\t\t\"perplexity\": float(metrics.get(\"perplexity\", 0.0)),\n\t\t}\n\n\t# Save combined report\n\treport = {\n\t\t\"model_id\": model_id,\n\t\t\"adapter_path\": str(adapter_path),\n\t\t\"results\": results,\n\t}\n\twith open(paths.out_dir / \"EvalComparison.json\", \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"EvalComparison\": report}, indent=2))\n\tprint(str(paths.out_dir / \"EvalComparison.json\"))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_eval_datasets.evaluate_model","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_eval_datasets.evaluate_model#L106-L140","kind":"function","name":"evaluate_model","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":106,"end_line":140,"context_start_line":86,"context_end_line":160,"code":"\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tbase_model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)\n\ttry:\n\t\tbase_model.config.use_cache = False\n\texcept Exception:\n\t\tpass\n\tdevice_map_used = model_kwargs.get(\"device_map\", None) is not None\n\tif not device_map_used:\n\t\tbase_model = base_model.to(device)\n\tif use_qlora:\n\t\ttry:\n\t\t\tbase_model = prepare_model_for_kbit_training(base_model) # type: ignore\n\t\texcept Exception:\n\t\t\tpass\n\tbase_model.eval()\n\n\t@torch.no_grad()\n\tdef evaluate_model(model: Any, tokenized_ds: Any, pad_token_id: int) -> Dict[str, float]:\n\t\tloader = DataLoader(tokenized_ds, batch_size=per_device_batch, shuffle=False)\n\t\ttotal_loss = 0.0\n\t\tnum_batches = 0\n\t\tfor batch in loader:\n\t\t\tlabels = batch.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\t# create labels equal to input_ids to do next-token prediction\n\t\t\t\tlabels = batch[\"input_ids\"].clone()\n\t\t\t\tbatch[\"labels\"] = labels\n\t\t\t# Move to device only when not using a sharded device_map\n\t\t\tif not device_map_used:\n\t\t\t\tfor k, v in list(batch.items()):\n\t\t\t\t\tif torch.is_tensor(v):\n\t\t\t\t\t\tbatch[k] = v.to(device)\n\t\t\tinp = {k: v for k, v in batch.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = batch[\"labels\"][..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(pad_token_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\ttotal_loss += float(loss.item())\n\t\t\tnum_batches += 1\n\t\tavg_loss = total_loss / max(1, num_batches)\n\t\ttry:\n\t\t\tppl = float(torch.exp(torch.tensor(avg_loss)).item())\n\t\texcept Exception:\n\t\t\tppl = float(\"inf\")\n\t\treturn {\"loss\": avg_loss, \"perplexity\": ppl}\n\n\t# Data util\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tdef make_dataset(texts: List[str]) -> Any:\n\t\traw = HFDataset.from_dict({\"text\": texts})\n\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\treturn out\n\t\treturn raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\n\t# Collator\n\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\n\t# Resolve datasets one-by-one (sequential, full by default)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\ttrain_new_only = False # eval should not skip previously seen\n\titer_program_texts = None\n\ttry:\n\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\titer_program_texts = _ipt","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_eval_datasets.make_dataset","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_eval_datasets.make_dataset#L144-L149","kind":"function","name":"make_dataset","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":144,"end_line":149,"context_start_line":124,"context_end_line":169,"code":"\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = batch[\"labels\"][..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(pad_token_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\ttotal_loss += float(loss.item())\n\t\t\tnum_batches += 1\n\t\tavg_loss = total_loss / max(1, num_batches)\n\t\ttry:\n\t\t\tppl = float(torch.exp(torch.tensor(avg_loss)).item())\n\t\texcept Exception:\n\t\t\tppl = float(\"inf\")\n\t\treturn {\"loss\": avg_loss, \"perplexity\": ppl}\n\n\t# Data util\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tdef make_dataset(texts: List[str]) -> Any:\n\t\traw = HFDataset.from_dict({\"text\": texts})\n\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\treturn out\n\t\treturn raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\n\t# Collator\n\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\n\t# Resolve datasets one-by-one (sequential, full by default)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\ttrain_new_only = False # eval should not skip previously seen\n\titer_program_texts = None\n\ttry:\n\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\titer_program_texts = _ipt\n\texcept Exception:\n\t\ttry:\n\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\tif _spec and _spec.loader:\n\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\titer_program_texts = getattr(_mod, \"iter_program_texts\", None) # type: ignore","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_eval_datasets._tok_fn","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_eval_datasets._tok_fn#L146-L148","kind":"function","name":"_tok_fn","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":146,"end_line":148,"context_start_line":126,"context_end_line":168,"code":"\t\t\tshift_labels = batch[\"labels\"][..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(pad_token_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\ttotal_loss += float(loss.item())\n\t\t\tnum_batches += 1\n\t\tavg_loss = total_loss / max(1, num_batches)\n\t\ttry:\n\t\t\tppl = float(torch.exp(torch.tensor(avg_loss)).item())\n\t\texcept Exception:\n\t\t\tppl = float(\"inf\")\n\t\treturn {\"loss\": avg_loss, \"perplexity\": ppl}\n\n\t# Data util\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tdef make_dataset(texts: List[str]) -> Any:\n\t\traw = HFDataset.from_dict({\"text\": texts})\n\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\treturn out\n\t\treturn raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\n\t# Collator\n\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\n\t# Resolve datasets one-by-one (sequential, full by default)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\ttrain_new_only = False # eval should not skip previously seen\n\titer_program_texts = None\n\ttry:\n\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\titer_program_texts = _ipt\n\texcept Exception:\n\t\ttry:\n\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\tif _spec and _spec.loader:\n\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp","uri":"program://Program_Conditioned_Adapter/module/examples.dataset_trainer.run_llama_lora_train_mbpp#L1-L443","kind":"module","name":"examples.dataset_trainer.run_llama_lora_train_mbpp","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":1,"end_line":443,"context_start_line":1,"context_end_line":443,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\nimport torch\nimport torch.nn.functional as F\n\n# Minimal dependencies: transformers + peft + datasets\n\n\n@dataclass\nclass TrainPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\n\ndef _ensure_dirs(paths: TrainPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef _load_mbpp_texts(max_n: int) -> List[str]:\n\t# Prefer direct HF datasets to avoid import path issues\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\t\tds = None\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tds = load_dataset(\"mbpp\", subset, split=\"train\") if subset else load_dataset(\"mbpp\", split=\"train\")\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds is None:\n\t\t\traise RuntimeError(\"mbpp dataset not available\")\n\t\ttexts: List[str] = []\n\t\ttotal = len(ds)\n\t\tfor i in range(total):\n\t\t\trow = ds[i]\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\tif len(texts) >= int(max_n):\n\t\t\t\tbreak\n\t\tprint(f\"[mbpp] loaded {len(texts)} / {total} train samples\")\n\t\treturn texts\n\texcept Exception as e:\n\t\tprint(f\"[mbpp] failed via datasets; using tiny fallback: {e}\")\n\t\treturn [\n\t\t\t\"def add(x, y): return x + y\",\n\t\t\t\"class Foo:\\n def bar(self):\\n return 42\",\n\t\t\t\"for i in range(10): print(i)\",\n\t\t]\n\n\ndef main() -> None:\n\texample_dir = Path(__file__).resolve()\n\tartifacts_root = example_dir / \"datasets\" / \"hf_lora_mbpp\"\n\tpaths = TrainPaths(\n\t\tadapters_dir=artifacts_root / \"lora_adapter\",\n\t\tout_dir=artifacts_root / \"outputs\",\n\t)\n\t_ensure_dirs(paths)\n\tstate_path = artifacts_root / \".program_state.json\"\n\n\t# If not specified, prefer GPUs 0,1\n\tif not os.getenv(\"CUDA_VISIBLE_DEVICES\"):\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n\tmodel_id = os.getenv(\"LLAMA_MODEL_ID\", \"meta-llama/Llama-3.1-8B-Instruct\")\n\tmax_n = int(os.getenv(\"MBPP_MAX_N\", \"120\"))\n\tcontext_len = int(os.getenv(\"MAX_LEN\", \"256\"))\n\tlr = float(os.getenv(\"LR\", \"5e-5\"))\n\tnum_epochs = float(os.getenv(\"EPOCHS\", \"1\"))\n\tper_device_batch = int(os.getenv(\"BATCH_SIZE\", \"1\"))\n\tgrad_accum = int(os.getenv(\"GRAD_ACCUM_STEPS\", \"8\"))\n\twarmup_ratio = float(os.getenv(\"WARMUP_RATIO\", \"0.1\"))\n\tweight_decay = float(os.getenv(\"WEIGHT_DECAY\", \"0.01\"))\n\tsave_steps = int(os.getenv(\"SAVE_STEPS\", \"0\"))\n\tlogging_steps = int(os.getenv(\"LOG_STEPS\", \"25\"))\n\tseed = int(os.getenv(\"SEED\", \"0\"))\n\tuse_qlora = os.getenv(\"USE_QLORA\", \"1\") == \"1\"\n\n\ttorch.manual_seed(seed)\n\tif torch.cuda.is_available():\n\t\ttorch.cuda.manual_seed_all(seed)\n\n\t# Tokenizer/model\n\tfrom transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, Trainer, TrainingArguments # type: ignore\n\ttry:\n\t\tfrom peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, PeftModel # type: ignore\n\texcept Exception as e:\n\t\tprint(f\"[lora] PEFT not available: {e}\")\n\t\tprint(\"Please install 'peft' to train adapters: pip install peft\")\n\t\tsys.exit(2)\n\n\ttok = AutoTokenizer.from_pretrained(model_id)\n\tif tok.pad_token_id is None and tok.eos_token is not None:\n\t\ttok.pad_token = tok.eos_token\n\tpad_id = int(tok.pad_token_id or 0)\n\n\tprint(f\"[model] loading {model_id}\")\n\tmodel_kwargs: Dict[str, Any] = {}\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tvisible = os.environ.get(\"CUDA_VISIBLE_DEVICES\", \"\")\n\tnum_visible = len([v for v in visible.split(\",\") if v.strip()]) if visible else torch.cuda.device_count()\n\tif use_qlora:\n\t\ttry:\n\t\t\tfrom transformers import BitsAndBytesConfig # type: ignore\n\t\t\tbnb_config = BitsAndBytesConfig(\n\t\t\t\tload_in_4bit=True,\n\t\t\t\tbnb_4bit_quant_type=\"nf4\",\n\t\t\t\tbnb_4bit_compute_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n\t\t\t)\n\t\t\tmodel_kwargs[\"quantization_config\"] = bnb_config\n\t\t\t# Prefer layer-wise sharding over DataParallel\n\t\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\texcept Exception as e:\n\t\t\tprint(f\"[qlora] BitsAndBytes not available, falling back to full precision: {e}\")\n\t\t\tmodel_kwargs[\"device_map\"] = None\n\t\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\telse:\n\t\t# If multiple GPUs, shard with accelerate device_map\n\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\n\tmodel = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)\n\t# Disable cache when using gradient checkpointing\n\ttry:\n\t\tmodel.config.use_cache = False\n\texcept Exception:\n\t\tpass\n\t# Only move to single device when not using device_map auto\n\tif model_kwargs.get(\"device_map\", None) is None:\n\t\tmodel = model.to(device)\n\t# Prepare for k-bit training (enables input grads, casts norms, etc.)\n\tif use_qlora:\n\t\ttry:\n\t\t\tmodel = prepare_model_for_kbit_training(model)\n\t\texcept Exception as e:\n\t\t\tprint(f\"[qlora] prepare_model_for_kbit_training failed: {e}\")\n\t# Optional: gradient checkpointing for memory\n\tif hasattr(model, \"gradient_checkpointing_enable\"):\n\t\ttry:\n\t\t\tmodel.gradient_checkpointing_enable()\n\t\texcept Exception:\n\t\t\tpass\n\n\t# Attach LoRA to attention and MLP projections\n\ttarget_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"]\n\tlora_r = int(os.getenv(\"LORA_R\", \"8\"))\n\tlora_alpha = int(os.getenv(\"LORA_ALPHA\", \"16\"))\n\tlora_dropout = float(os.getenv(\"LORA_DROPOUT\", \"0.05\"))\n\tresume = os.getenv(\"RESUME\", \"0\") == \"1\"\n\tadapter_override = os.getenv(\"ADAPTER_PATH\", \"\").strip()\n\tloaded_adapter_path = None\n\tif adapter_override:\n\t\tloaded_adapter_path = adapter_override\n\telif state_path.exists():\n\t\ttry:\n\t\t\tprev_state = json.loads(state_path.read_text())\n\t\t\tloaded_adapter_path = str(prev_state.get(\"last_adapter_path\") or \"\").strip() or None\n\t\texcept Exception:\n\t\t\tloaded_adapter_path = None\n\t# If resume is requested but no adapter path is available, fall back to creating a new LoRA\n\tif resume and not loaded_adapter_path:\n\t\tresume = False\n\tif resume and loaded_adapter_path:\n\t\ttry:\n\t\t\tmodel = PeftModel.from_pretrained(model, loaded_adapter_path, is_trainable=True)\n\t\t\tprint(f\"[resume] loaded adapter from {loaded_adapter_path}\")\n\t\texcept Exception as e:\n\t\t\tprint(f\"[resume] failed to load adapter at {loaded_adapter_path}: {e}\")\n\t\t\tresume = False\n\tif not resume:\n\t\tlora_cfg = LoraConfig(\n\t\t\tr=lora_r,\n\t\t\tlora_alpha=lora_alpha,\n\t\t\ttarget_modules=target_modules,\n\t\t\tlora_dropout=lora_dropout,\n\t\t\tbias=\"none\",\n\t\t\ttask_type=\"CAUSAL_LM\",\n\t\t)\n\t\tmodel = get_peft_model(model, lora_cfg)\n\t# Print trainable parameter summary without relying on PEFT helper\n\ttry:\n\t\ttrainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n\t\ttotal = sum(p.numel() for p in model.parameters())\n\t\tratio = (float(trainable) / float(max(1, total))) * 100.0\n\t\tprint(f\"trainable params: {trainable:,} || all params: {total:,} || trainable%: {ratio:.4f}\")\n\texcept Exception:\n\t\tpass\n\n\t# Trainer with custom loss that avoids passing labels to model forward\n\tfrom transformers import Trainer, TrainingArguments # type: ignore\n\tclass LossTrainer(Trainer):\n\t\tdef __init__(self, pad_id: int, *args, **kwargs) -> None:\n\t\t\tsuper().__init__(*args, **kwargs)\n\t\t\tself._pad_id = int(pad_id)\n\t\tdef compute_loss(self, model, inputs, return_outputs: bool = False, num_items_in_batch=None):\n\t\t\tlabels = inputs.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\treturn super().compute_loss(model, inputs, return_outputs=return_outputs)\n\t\t\tinp = {k: v for k, v in inputs.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = labels[..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(self._pad_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\treturn (loss, outputs) if return_outputs else loss\n\n\t# Build dataset(s) via configurable loader (local-first, then HF)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\tsequential = os.getenv(\"SEQUENTIAL_TRAIN\", \"1\") == \"1\"\n\t# In sequential mode, default to skipping datasets we've already trained\n\ttrain_new_only = os.getenv(\"TRAIN_NEW_ONLY\", \"1\" if sequential else \"0\") == \"1\"\n\tloaded_names: List[str] = []\n\ttotal_samples = 0\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tif sequential:\n\t\titer_program_texts = None\n\t\ttry:\n\t\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\t\titer_program_texts = _ipt\n\t\texcept Exception:\n\t\t\t# Fallback: import from file path to avoid relying on PYTHONPATH\n\t\t\ttry:\n\t\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\t\tif _spec and _spec.loader:\n\t\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\t\titer_program_texts = getattr(_mod, \"iter_program_texts\", None) # type: ignore\n\t\t\texcept Exception:\n\t\t\t\titer_program_texts = None\n\t\tif iter_program_texts is not None:\n\t\t\tsource_iter = iter_program_texts(\n\t\t\t\texample_dir=str(example_dir),\n\t\t\t\tconfig_path=datasets_cfg or None,\n\t\t\t\ttrain_new_only=train_new_only,\n\t\t\t\tstate_path=str(state_path),\n\t\t\t)\n\t\t\tsaw_any = False\n\t\t\tfor ds_texts, src_name in source_iter:\n\t\t\t\tif not ds_texts:\n\t\t\t\t\tcontinue\n\t\t\t\tsaw_any = True\n\t\t\t\traw = HFDataset.from_dict({\"text\": ds_texts})\n\t\t\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\t\t\treturn out\n\t\t\t\ttokenized = raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\t\t\t\t# LM data collator will create labels, and default ignore_index=-100\n\t\t\t\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\t\t\t\ttrain_args = TrainingArguments(\n\t\t\t\t\toutput_dir=str(paths.out_dir / \"trainer\"),\n\t\t\t\t\tper_device_train_batch_size=per_device_batch,\n\t\t\t\t\tgradient_accumulation_steps=grad_accum,\n\t\t\t\t\tlearning_rate=lr,\n\t\t\t\t\tweight_decay=weight_decay,\n\t\t\t\t\tnum_train_epochs=num_epochs,\n\t\t\t\t\twarmup_ratio=warmup_ratio,\n\t\t\t\t\tlogging_steps=logging_steps,\n\t\t\t\t\tsave_steps=save_steps,\n\t\t\t\t\tsave_total_limit=1,\n\t\t\t\t\treport_to=[],\n\t\t\t\t\tseed=seed,\n\t\t\t\t\tbf16=torch.cuda.is_available(),\n\t\t\t\t\tfp16=False if torch.cuda.is_available() else False,\n\t\t\t\t)\n\t\t\t\ttrainer = LossTrainer(\n\t\t\t\t\tpad_id=pad_id,\n\t\t\t\t\tmodel=model,\n\t\t\t\t\targs=train_args,\n\t\t\t\t\ttrain_dataset=tokenized,\n\t\t\t\t\tdata_collator=collator,\n\t\t\t\t)\n\t\t\t\tprint(f\"[train] starting on {src_name} ({len(ds_texts)} samples)\")\n\t\t\t\t_ = trainer.train()\n\t\t\t\tprint(f\"[train] done {src_name}\")\n\t\t\t\t# Save LoRA adapter incrementally and update state\n\t\t\t\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\t\t\t\tmodel.save_pretrained(str(paths.adapters_dir))\n\t\t\t\ttry:\n\t\t\t\t\tstate = {}\n\t\t\t\t\tif state_path.exists():\n\t\t\t\t\t\tstate = json.loads(state_path.read_text())\n\t\t\t\t\tstate[\"last_adapter_path\"] = str(paths.adapters_dir)\n\t\t\t\t\tseen = set(state.get(\"datasets_seen\", []))\n\t\t\t\t\tseen.add(src_name)\n\t\t\t\t\tstate[\"datasets_seen\"] = sorted(list(seen))\n\t\t\t\t\tstate_path.write_text(json.dumps(state, indent=2), encoding=\"utf-8\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\t\tloaded_names.append(src_name)\n\t\t\t\ttotal_samples += len(ds_texts)\n\t\t\tif not saw_any:\n\t\t\t\t# Fallback to MBPP when nothing configured/available\n\t\t\t\tds_texts = _load_mbpp_texts(max_n=max_n)\n\t\t\t\traw = HFDataset.from_dict({\"text\": ds_texts})\n\t\t\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\t\t\treturn out\n\t\t\t\ttokenized = raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\t\telse:\n\t\t\t# Fall through to non-sequential path on missing iterator\n\t\t\tsequential = False\n\tif not sequential:\n\t\ttry:\n\t\t\tfrom examples.scripts.datasets_loader import load_program_texts # type: ignore\n\t\texcept Exception:\n\t\t\t# Fallback: load loader directly from file path without altering sys.path\n\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\tif _spec and _spec.loader:\n\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\tload_program_texts = getattr(_mod, \"load_program_texts\") # type: ignore\n\t\t\telse:\n\t\t\t\traise\n\t\ttexts, loaded_names = load_program_texts(\n\t\t\texample_dir=str(example_dir),\n\t\t\tconfig_path=datasets_cfg or None,\n\t\t\ttrain_new_only=train_new_only,\n\t\t\tstate_path=str(state_path),\n\t\t)\n\t\tif not texts:\n\t\t\t# Fallback to MBPP train only if config produced nothing\n\t\t\ttexts = _load_mbpp_texts(max_n=max_n)\n\t\t\tloaded_names = [\"mbpp:train\"]\n\t\traw = HFDataset.from_dict({\"text\": texts})\n\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\treturn out\n\t\ttokenized = raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\t\ttotal_samples = len(texts)\n\n\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\treturn out\n\n\tif not sequential:\n\t\t# LM data collator will create labels, and default ignore_index=-100\n\t\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\t\t# Training config\n\t\ttrain_args = TrainingArguments(\n\t\t\toutput_dir=str(paths.out_dir / \"trainer\"),\n\t\t\tper_device_train_batch_size=per_device_batch,\n\t\t\tgradient_accumulation_steps=grad_accum,\n\t\t\tlearning_rate=lr,\n\t\t\tweight_decay=weight_decay,\n\t\t\tnum_train_epochs=num_epochs,\n\t\t\twarmup_ratio=warmup_ratio,\n\t\t\tlogging_steps=logging_steps,\n\t\t\tsave_steps=save_steps,\n\t\t\tsave_total_limit=1,\n\t\t\treport_to=[],\n\t\t\tseed=seed,\n\t\t\tbf16=torch.cuda.is_available(),\n\t\t\tfp16=False if torch.cuda.is_available() else False,\n\t\t)\n\t\n\tif not sequential:\n\t\ttrainer = LossTrainer(\n\t\t\tpad_id=pad_id,\n\t\t\tmodel=model,\n\t\t\targs=train_args,\n\t\t\ttrain_dataset=tokenized,\n\t\t\tdata_collator=collator,\n\t\t)\n\t\tprint(\"[train] starting\")\n\t\ttrain_out = trainer.train()\n\t\tprint(\"[train] done\")\n\telse:\n\t\ttrain_out = type(\"obj\", (), {\"metrics\": {}})() # minimal placeholder\n\n\t# Save LoRA adapter\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tmodel.save_pretrained(str(paths.adapters_dir))\n\t# Update program state for continuous training\n\ttry:\n\t\tstate = {}\n\t\tif state_path.exists():\n\t\t\tstate = json.loads(state_path.read_text())\n\t\tstate[\"last_adapter_path\"] = str(paths.adapters_dir)\n\t\tseen = set(state.get(\"datasets_seen\", []))\n\t\tseen.update(loaded_names or [])\n\t\tstate[\"datasets_seen\"] = sorted(list(seen))\n\t\tstate[\"last_training\"] = {\n\t\t\t\"samples\": total_samples,\n\t\t\t\"epochs\": num_epochs,\n\t\t\t\"batch_size\": per_device_batch,\n\t\t\t\"grad_accum\": grad_accum,\n\t\t\t\"lr\": lr,\n\t\t\t\"max_len\": context_len,\n\t\t}\n\t\tstate_path.write_text(json.dumps(state, indent=2), encoding=\"utf-8\")\n\t\tprint(f\"[state] updated {state_path}\")\n\texcept Exception as e:\n\t\tprint(f\"[state] failed to update program state: {e}\")\n\t# Save report\n\tmetrics = train_out.metrics if hasattr(train_out, \"metrics\") else {}\n\treport = {\n\t\t\"model_id\": model_id,\n\t\t\"num_samples\": total_samples,\n\t\t\"epochs\": num_epochs,\n\t\t\"batch_size\": per_device_batch,\n\t\t\"grad_accum\": grad_accum,\n\t\t\"lr\": lr,\n\t\t\"warmup_ratio\": warmup_ratio,\n\t\t\"weight_decay\": weight_decay,\n\t\t\"lora\": {\n\t\t\t\"r\": lora_r,\n\t\t\t\"alpha\": lora_alpha,\n\t\t\t\"dropout\": lora_dropout,\n\t\t\t\"targets\": target_modules,\n\t\t},\n\t\t\"metrics\": metrics,\n\t\t\"adapter_path\": str(paths.adapters_dir),\n\t}\n\twith open(paths.out_dir / \"LoraTrainingReport.json\", \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"LoraTrainingReport\": report}, indent=2))\n\tprint(str(paths.out_dir / \"LoraTrainingReport.json\"))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp.TrainPaths","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.run_llama_lora_train_mbpp.TrainPaths#L17-L19","kind":"class","name":"TrainPaths","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":17,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\nimport torch\nimport torch.nn.functional as F\n\n# Minimal dependencies: transformers + peft + datasets\n\n\n@dataclass\nclass TrainPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\n\ndef _ensure_dirs(paths: TrainPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef _load_mbpp_texts(max_n: int) -> List[str]:\n\t# Prefer direct HF datasets to avoid import path issues\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\t\tds = None\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tds = load_dataset(\"mbpp\", subset, split=\"train\") if subset else load_dataset(\"mbpp\", split=\"train\")\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds is None:\n\t\t\traise RuntimeError(\"mbpp dataset not available\")","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp._ensure_dirs","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_llama_lora_train_mbpp._ensure_dirs#L22-L24","kind":"function","name":"_ensure_dirs","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":22,"end_line":24,"context_start_line":2,"context_end_line":44,"code":"\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\nimport torch\nimport torch.nn.functional as F\n\n# Minimal dependencies: transformers + peft + datasets\n\n\n@dataclass\nclass TrainPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\n\ndef _ensure_dirs(paths: TrainPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef _load_mbpp_texts(max_n: int) -> List[str]:\n\t# Prefer direct HF datasets to avoid import path issues\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\t\tds = None\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tds = load_dataset(\"mbpp\", subset, split=\"train\") if subset else load_dataset(\"mbpp\", split=\"train\")\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds is None:\n\t\t\traise RuntimeError(\"mbpp dataset not available\")\n\t\ttexts: List[str] = []\n\t\ttotal = len(ds)\n\t\tfor i in range(total):\n\t\t\trow = ds[i]\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp._load_mbpp_texts","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_llama_lora_train_mbpp._load_mbpp_texts#L27-L59","kind":"function","name":"_load_mbpp_texts","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":27,"end_line":59,"context_start_line":7,"context_end_line":79,"code":"from pathlib import Path\nfrom typing import List, Dict, Any\n\nimport torch\nimport torch.nn.functional as F\n\n# Minimal dependencies: transformers + peft + datasets\n\n\n@dataclass\nclass TrainPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\n\ndef _ensure_dirs(paths: TrainPaths) -> None:\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tpaths.out_dir.mkdir(parents=True, exist_ok=True)\n\n\ndef _load_mbpp_texts(max_n: int) -> List[str]:\n\t# Prefer direct HF datasets to avoid import path issues\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\t\tds = None\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tds = load_dataset(\"mbpp\", subset, split=\"train\") if subset else load_dataset(\"mbpp\", split=\"train\")\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds is None:\n\t\t\traise RuntimeError(\"mbpp dataset not available\")\n\t\ttexts: List[str] = []\n\t\ttotal = len(ds)\n\t\tfor i in range(total):\n\t\t\trow = ds[i]\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\tif len(texts) >= int(max_n):\n\t\t\t\tbreak\n\t\tprint(f\"[mbpp] loaded {len(texts)} / {total} train samples\")\n\t\treturn texts\n\texcept Exception as e:\n\t\tprint(f\"[mbpp] failed via datasets; using tiny fallback: {e}\")\n\t\treturn [\n\t\t\t\"def add(x, y): return x + y\",\n\t\t\t\"class Foo:\\n def bar(self):\\n return 42\",\n\t\t\t\"for i in range(10): print(i)\",\n\t\t]\n\n\ndef main() -> None:\n\texample_dir = Path(__file__).resolve()\n\tartifacts_root = example_dir / \"datasets\" / \"hf_lora_mbpp\"\n\tpaths = TrainPaths(\n\t\tadapters_dir=artifacts_root / \"lora_adapter\",\n\t\tout_dir=artifacts_root / \"outputs\",\n\t)\n\t_ensure_dirs(paths)\n\tstate_path = artifacts_root / \".program_state.json\"\n\n\t# If not specified, prefer GPUs 0,1\n\tif not os.getenv(\"CUDA_VISIBLE_DEVICES\"):\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n\tmodel_id = os.getenv(\"LLAMA_MODEL_ID\", \"meta-llama/Llama-3.1-8B-Instruct\")\n\tmax_n = int(os.getenv(\"MBPP_MAX_N\", \"120\"))\n\tcontext_len = int(os.getenv(\"MAX_LEN\", \"256\"))\n\tlr = float(os.getenv(\"LR\", \"5e-5\"))","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp.main","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_llama_lora_train_mbpp.main#L62-L437","kind":"function","name":"main","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":62,"end_line":437,"context_start_line":42,"context_end_line":443,"code":"\t\tfor i in range(total):\n\t\t\trow = ds[i]\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\tif len(texts) >= int(max_n):\n\t\t\t\tbreak\n\t\tprint(f\"[mbpp] loaded {len(texts)} / {total} train samples\")\n\t\treturn texts\n\texcept Exception as e:\n\t\tprint(f\"[mbpp] failed via datasets; using tiny fallback: {e}\")\n\t\treturn [\n\t\t\t\"def add(x, y): return x + y\",\n\t\t\t\"class Foo:\\n def bar(self):\\n return 42\",\n\t\t\t\"for i in range(10): print(i)\",\n\t\t]\n\n\ndef main() -> None:\n\texample_dir = Path(__file__).resolve()\n\tartifacts_root = example_dir / \"datasets\" / \"hf_lora_mbpp\"\n\tpaths = TrainPaths(\n\t\tadapters_dir=artifacts_root / \"lora_adapter\",\n\t\tout_dir=artifacts_root / \"outputs\",\n\t)\n\t_ensure_dirs(paths)\n\tstate_path = artifacts_root / \".program_state.json\"\n\n\t# If not specified, prefer GPUs 0,1\n\tif not os.getenv(\"CUDA_VISIBLE_DEVICES\"):\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n\tmodel_id = os.getenv(\"LLAMA_MODEL_ID\", \"meta-llama/Llama-3.1-8B-Instruct\")\n\tmax_n = int(os.getenv(\"MBPP_MAX_N\", \"120\"))\n\tcontext_len = int(os.getenv(\"MAX_LEN\", \"256\"))\n\tlr = float(os.getenv(\"LR\", \"5e-5\"))\n\tnum_epochs = float(os.getenv(\"EPOCHS\", \"1\"))\n\tper_device_batch = int(os.getenv(\"BATCH_SIZE\", \"1\"))\n\tgrad_accum = int(os.getenv(\"GRAD_ACCUM_STEPS\", \"8\"))\n\twarmup_ratio = float(os.getenv(\"WARMUP_RATIO\", \"0.1\"))\n\tweight_decay = float(os.getenv(\"WEIGHT_DECAY\", \"0.01\"))\n\tsave_steps = int(os.getenv(\"SAVE_STEPS\", \"0\"))\n\tlogging_steps = int(os.getenv(\"LOG_STEPS\", \"25\"))\n\tseed = int(os.getenv(\"SEED\", \"0\"))\n\tuse_qlora = os.getenv(\"USE_QLORA\", \"1\") == \"1\"\n\n\ttorch.manual_seed(seed)\n\tif torch.cuda.is_available():\n\t\ttorch.cuda.manual_seed_all(seed)\n\n\t# Tokenizer/model\n\tfrom transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, Trainer, TrainingArguments # type: ignore\n\ttry:\n\t\tfrom peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, PeftModel # type: ignore\n\texcept Exception as e:\n\t\tprint(f\"[lora] PEFT not available: {e}\")\n\t\tprint(\"Please install 'peft' to train adapters: pip install peft\")\n\t\tsys.exit(2)\n\n\ttok = AutoTokenizer.from_pretrained(model_id)\n\tif tok.pad_token_id is None and tok.eos_token is not None:\n\t\ttok.pad_token = tok.eos_token\n\tpad_id = int(tok.pad_token_id or 0)\n\n\tprint(f\"[model] loading {model_id}\")\n\tmodel_kwargs: Dict[str, Any] = {}\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tvisible = os.environ.get(\"CUDA_VISIBLE_DEVICES\", \"\")\n\tnum_visible = len([v for v in visible.split(\",\") if v.strip()]) if visible else torch.cuda.device_count()\n\tif use_qlora:\n\t\ttry:\n\t\t\tfrom transformers import BitsAndBytesConfig # type: ignore\n\t\t\tbnb_config = BitsAndBytesConfig(\n\t\t\t\tload_in_4bit=True,\n\t\t\t\tbnb_4bit_quant_type=\"nf4\",\n\t\t\t\tbnb_4bit_compute_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n\t\t\t)\n\t\t\tmodel_kwargs[\"quantization_config\"] = bnb_config\n\t\t\t# Prefer layer-wise sharding over DataParallel\n\t\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\texcept Exception as e:\n\t\t\tprint(f\"[qlora] BitsAndBytes not available, falling back to full precision: {e}\")\n\t\t\tmodel_kwargs[\"device_map\"] = None\n\t\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\telse:\n\t\t# If multiple GPUs, shard with accelerate device_map\n\t\tmodel_kwargs[\"device_map\"] = \"auto\" if num_visible > 1 else None\n\t\tmodel_kwargs[\"torch_dtype\"] = torch.bfloat16 if torch.cuda.is_available() else torch.float32\n\n\tmodel = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)\n\t# Disable cache when using gradient checkpointing\n\ttry:\n\t\tmodel.config.use_cache = False\n\texcept Exception:\n\t\tpass\n\t# Only move to single device when not using device_map auto\n\tif model_kwargs.get(\"device_map\", None) is None:\n\t\tmodel = model.to(device)\n\t# Prepare for k-bit training (enables input grads, casts norms, etc.)\n\tif use_qlora:\n\t\ttry:\n\t\t\tmodel = prepare_model_for_kbit_training(model)\n\t\texcept Exception as e:\n\t\t\tprint(f\"[qlora] prepare_model_for_kbit_training failed: {e}\")\n\t# Optional: gradient checkpointing for memory\n\tif hasattr(model, \"gradient_checkpointing_enable\"):\n\t\ttry:\n\t\t\tmodel.gradient_checkpointing_enable()\n\t\texcept Exception:\n\t\t\tpass\n\n\t# Attach LoRA to attention and MLP projections\n\ttarget_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"]\n\tlora_r = int(os.getenv(\"LORA_R\", \"8\"))\n\tlora_alpha = int(os.getenv(\"LORA_ALPHA\", \"16\"))\n\tlora_dropout = float(os.getenv(\"LORA_DROPOUT\", \"0.05\"))\n\tresume = os.getenv(\"RESUME\", \"0\") == \"1\"\n\tadapter_override = os.getenv(\"ADAPTER_PATH\", \"\").strip()\n\tloaded_adapter_path = None\n\tif adapter_override:\n\t\tloaded_adapter_path = adapter_override\n\telif state_path.exists():\n\t\ttry:\n\t\t\tprev_state = json.loads(state_path.read_text())\n\t\t\tloaded_adapter_path = str(prev_state.get(\"last_adapter_path\") or \"\").strip() or None\n\t\texcept Exception:\n\t\t\tloaded_adapter_path = None\n\t# If resume is requested but no adapter path is available, fall back to creating a new LoRA\n\tif resume and not loaded_adapter_path:\n\t\tresume = False\n\tif resume and loaded_adapter_path:\n\t\ttry:\n\t\t\tmodel = PeftModel.from_pretrained(model, loaded_adapter_path, is_trainable=True)\n\t\t\tprint(f\"[resume] loaded adapter from {loaded_adapter_path}\")\n\t\texcept Exception as e:\n\t\t\tprint(f\"[resume] failed to load adapter at {loaded_adapter_path}: {e}\")\n\t\t\tresume = False\n\tif not resume:\n\t\tlora_cfg = LoraConfig(\n\t\t\tr=lora_r,\n\t\t\tlora_alpha=lora_alpha,\n\t\t\ttarget_modules=target_modules,\n\t\t\tlora_dropout=lora_dropout,\n\t\t\tbias=\"none\",\n\t\t\ttask_type=\"CAUSAL_LM\",\n\t\t)\n\t\tmodel = get_peft_model(model, lora_cfg)\n\t# Print trainable parameter summary without relying on PEFT helper\n\ttry:\n\t\ttrainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n\t\ttotal = sum(p.numel() for p in model.parameters())\n\t\tratio = (float(trainable) / float(max(1, total))) * 100.0\n\t\tprint(f\"trainable params: {trainable:,} || all params: {total:,} || trainable%: {ratio:.4f}\")\n\texcept Exception:\n\t\tpass\n\n\t# Trainer with custom loss that avoids passing labels to model forward\n\tfrom transformers import Trainer, TrainingArguments # type: ignore\n\tclass LossTrainer(Trainer):\n\t\tdef __init__(self, pad_id: int, *args, **kwargs) -> None:\n\t\t\tsuper().__init__(*args, **kwargs)\n\t\t\tself._pad_id = int(pad_id)\n\t\tdef compute_loss(self, model, inputs, return_outputs: bool = False, num_items_in_batch=None):\n\t\t\tlabels = inputs.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\treturn super().compute_loss(model, inputs, return_outputs=return_outputs)\n\t\t\tinp = {k: v for k, v in inputs.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = labels[..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(self._pad_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\treturn (loss, outputs) if return_outputs else loss\n\n\t# Build dataset(s) via configurable loader (local-first, then HF)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\tsequential = os.getenv(\"SEQUENTIAL_TRAIN\", \"1\") == \"1\"\n\t# In sequential mode, default to skipping datasets we've already trained\n\ttrain_new_only = os.getenv(\"TRAIN_NEW_ONLY\", \"1\" if sequential else \"0\") == \"1\"\n\tloaded_names: List[str] = []\n\ttotal_samples = 0\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tif sequential:\n\t\titer_program_texts = None\n\t\ttry:\n\t\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\t\titer_program_texts = _ipt\n\t\texcept Exception:\n\t\t\t# Fallback: import from file path to avoid relying on PYTHONPATH\n\t\t\ttry:\n\t\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\t\tif _spec and _spec.loader:\n\t\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\t\titer_program_texts = getattr(_mod, \"iter_program_texts\", None) # type: ignore\n\t\t\texcept Exception:\n\t\t\t\titer_program_texts = None\n\t\tif iter_program_texts is not None:\n\t\t\tsource_iter = iter_program_texts(\n\t\t\t\texample_dir=str(example_dir),\n\t\t\t\tconfig_path=datasets_cfg or None,\n\t\t\t\ttrain_new_only=train_new_only,\n\t\t\t\tstate_path=str(state_path),\n\t\t\t)\n\t\t\tsaw_any = False\n\t\t\tfor ds_texts, src_name in source_iter:\n\t\t\t\tif not ds_texts:\n\t\t\t\t\tcontinue\n\t\t\t\tsaw_any = True\n\t\t\t\traw = HFDataset.from_dict({\"text\": ds_texts})\n\t\t\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\t\t\treturn out\n\t\t\t\ttokenized = raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\t\t\t\t# LM data collator will create labels, and default ignore_index=-100\n\t\t\t\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\t\t\t\ttrain_args = TrainingArguments(\n\t\t\t\t\toutput_dir=str(paths.out_dir / \"trainer\"),\n\t\t\t\t\tper_device_train_batch_size=per_device_batch,\n\t\t\t\t\tgradient_accumulation_steps=grad_accum,\n\t\t\t\t\tlearning_rate=lr,\n\t\t\t\t\tweight_decay=weight_decay,\n\t\t\t\t\tnum_train_epochs=num_epochs,\n\t\t\t\t\twarmup_ratio=warmup_ratio,\n\t\t\t\t\tlogging_steps=logging_steps,\n\t\t\t\t\tsave_steps=save_steps,\n\t\t\t\t\tsave_total_limit=1,\n\t\t\t\t\treport_to=[],\n\t\t\t\t\tseed=seed,\n\t\t\t\t\tbf16=torch.cuda.is_available(),\n\t\t\t\t\tfp16=False if torch.cuda.is_available() else False,\n\t\t\t\t)\n\t\t\t\ttrainer = LossTrainer(\n\t\t\t\t\tpad_id=pad_id,\n\t\t\t\t\tmodel=model,\n\t\t\t\t\targs=train_args,\n\t\t\t\t\ttrain_dataset=tokenized,\n\t\t\t\t\tdata_collator=collator,\n\t\t\t\t)\n\t\t\t\tprint(f\"[train] starting on {src_name} ({len(ds_texts)} samples)\")\n\t\t\t\t_ = trainer.train()\n\t\t\t\tprint(f\"[train] done {src_name}\")\n\t\t\t\t# Save LoRA adapter incrementally and update state\n\t\t\t\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\t\t\t\tmodel.save_pretrained(str(paths.adapters_dir))\n\t\t\t\ttry:\n\t\t\t\t\tstate = {}\n\t\t\t\t\tif state_path.exists():\n\t\t\t\t\t\tstate = json.loads(state_path.read_text())\n\t\t\t\t\tstate[\"last_adapter_path\"] = str(paths.adapters_dir)\n\t\t\t\t\tseen = set(state.get(\"datasets_seen\", []))\n\t\t\t\t\tseen.add(src_name)\n\t\t\t\t\tstate[\"datasets_seen\"] = sorted(list(seen))\n\t\t\t\t\tstate_path.write_text(json.dumps(state, indent=2), encoding=\"utf-8\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\t\tloaded_names.append(src_name)\n\t\t\t\ttotal_samples += len(ds_texts)\n\t\t\tif not saw_any:\n\t\t\t\t# Fallback to MBPP when nothing configured/available\n\t\t\t\tds_texts = _load_mbpp_texts(max_n=max_n)\n\t\t\t\traw = HFDataset.from_dict({\"text\": ds_texts})\n\t\t\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\t\t\treturn out\n\t\t\t\ttokenized = raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\t\telse:\n\t\t\t# Fall through to non-sequential path on missing iterator\n\t\t\tsequential = False\n\tif not sequential:\n\t\ttry:\n\t\t\tfrom examples.scripts.datasets_loader import load_program_texts # type: ignore\n\t\texcept Exception:\n\t\t\t# Fallback: load loader directly from file path without altering sys.path\n\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\tif _spec and _spec.loader:\n\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\tload_program_texts = getattr(_mod, \"load_program_texts\") # type: ignore\n\t\t\telse:\n\t\t\t\traise\n\t\ttexts, loaded_names = load_program_texts(\n\t\t\texample_dir=str(example_dir),\n\t\t\tconfig_path=datasets_cfg or None,\n\t\t\ttrain_new_only=train_new_only,\n\t\t\tstate_path=str(state_path),\n\t\t)\n\t\tif not texts:\n\t\t\t# Fallback to MBPP train only if config produced nothing\n\t\t\ttexts = _load_mbpp_texts(max_n=max_n)\n\t\t\tloaded_names = [\"mbpp:train\"]\n\t\traw = HFDataset.from_dict({\"text\": texts})\n\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\treturn out\n\t\ttokenized = raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\t\ttotal_samples = len(texts)\n\n\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\treturn out\n\n\tif not sequential:\n\t\t# LM data collator will create labels, and default ignore_index=-100\n\t\tcollator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)\n\t\t# Training config\n\t\ttrain_args = TrainingArguments(\n\t\t\toutput_dir=str(paths.out_dir / \"trainer\"),\n\t\t\tper_device_train_batch_size=per_device_batch,\n\t\t\tgradient_accumulation_steps=grad_accum,\n\t\t\tlearning_rate=lr,\n\t\t\tweight_decay=weight_decay,\n\t\t\tnum_train_epochs=num_epochs,\n\t\t\twarmup_ratio=warmup_ratio,\n\t\t\tlogging_steps=logging_steps,\n\t\t\tsave_steps=save_steps,\n\t\t\tsave_total_limit=1,\n\t\t\treport_to=[],\n\t\t\tseed=seed,\n\t\t\tbf16=torch.cuda.is_available(),\n\t\t\tfp16=False if torch.cuda.is_available() else False,\n\t\t)\n\t\n\tif not sequential:\n\t\ttrainer = LossTrainer(\n\t\t\tpad_id=pad_id,\n\t\t\tmodel=model,\n\t\t\targs=train_args,\n\t\t\ttrain_dataset=tokenized,\n\t\t\tdata_collator=collator,\n\t\t)\n\t\tprint(\"[train] starting\")\n\t\ttrain_out = trainer.train()\n\t\tprint(\"[train] done\")\n\telse:\n\t\ttrain_out = type(\"obj\", (), {\"metrics\": {}})() # minimal placeholder\n\n\t# Save LoRA adapter\n\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\tmodel.save_pretrained(str(paths.adapters_dir))\n\t# Update program state for continuous training\n\ttry:\n\t\tstate = {}\n\t\tif state_path.exists():\n\t\t\tstate = json.loads(state_path.read_text())\n\t\tstate[\"last_adapter_path\"] = str(paths.adapters_dir)\n\t\tseen = set(state.get(\"datasets_seen\", []))\n\t\tseen.update(loaded_names or [])\n\t\tstate[\"datasets_seen\"] = sorted(list(seen))\n\t\tstate[\"last_training\"] = {\n\t\t\t\"samples\": total_samples,\n\t\t\t\"epochs\": num_epochs,\n\t\t\t\"batch_size\": per_device_batch,\n\t\t\t\"grad_accum\": grad_accum,\n\t\t\t\"lr\": lr,\n\t\t\t\"max_len\": context_len,\n\t\t}\n\t\tstate_path.write_text(json.dumps(state, indent=2), encoding=\"utf-8\")\n\t\tprint(f\"[state] updated {state_path}\")\n\texcept Exception as e:\n\t\tprint(f\"[state] failed to update program state: {e}\")\n\t# Save report\n\tmetrics = train_out.metrics if hasattr(train_out, \"metrics\") else {}\n\treport = {\n\t\t\"model_id\": model_id,\n\t\t\"num_samples\": total_samples,\n\t\t\"epochs\": num_epochs,\n\t\t\"batch_size\": per_device_batch,\n\t\t\"grad_accum\": grad_accum,\n\t\t\"lr\": lr,\n\t\t\"warmup_ratio\": warmup_ratio,\n\t\t\"weight_decay\": weight_decay,\n\t\t\"lora\": {\n\t\t\t\"r\": lora_r,\n\t\t\t\"alpha\": lora_alpha,\n\t\t\t\"dropout\": lora_dropout,\n\t\t\t\"targets\": target_modules,\n\t\t},\n\t\t\"metrics\": metrics,\n\t\t\"adapter_path\": str(paths.adapters_dir),\n\t}\n\twith open(paths.out_dir / \"LoraTrainingReport.json\", \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"LoraTrainingReport\": report}, indent=2))\n\tprint(str(paths.out_dir / \"LoraTrainingReport.json\"))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp.LossTrainer","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.run_llama_lora_train_mbpp.LossTrainer#L202-L222","kind":"class","name":"LossTrainer","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":202,"end_line":222,"context_start_line":182,"context_end_line":242,"code":"\t\tlora_cfg = LoraConfig(\n\t\t\tr=lora_r,\n\t\t\tlora_alpha=lora_alpha,\n\t\t\ttarget_modules=target_modules,\n\t\t\tlora_dropout=lora_dropout,\n\t\t\tbias=\"none\",\n\t\t\ttask_type=\"CAUSAL_LM\",\n\t\t)\n\t\tmodel = get_peft_model(model, lora_cfg)\n\t# Print trainable parameter summary without relying on PEFT helper\n\ttry:\n\t\ttrainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n\t\ttotal = sum(p.numel() for p in model.parameters())\n\t\tratio = (float(trainable) / float(max(1, total))) * 100.0\n\t\tprint(f\"trainable params: {trainable:,} || all params: {total:,} || trainable%: {ratio:.4f}\")\n\texcept Exception:\n\t\tpass\n\n\t# Trainer with custom loss that avoids passing labels to model forward\n\tfrom transformers import Trainer, TrainingArguments # type: ignore\n\tclass LossTrainer(Trainer):\n\t\tdef __init__(self, pad_id: int, *args, **kwargs) -> None:\n\t\t\tsuper().__init__(*args, **kwargs)\n\t\t\tself._pad_id = int(pad_id)\n\t\tdef compute_loss(self, model, inputs, return_outputs: bool = False, num_items_in_batch=None):\n\t\t\tlabels = inputs.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\treturn super().compute_loss(model, inputs, return_outputs=return_outputs)\n\t\t\tinp = {k: v for k, v in inputs.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = labels[..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(self._pad_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\treturn (loss, outputs) if return_outputs else loss\n\n\t# Build dataset(s) via configurable loader (local-first, then HF)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\tsequential = os.getenv(\"SEQUENTIAL_TRAIN\", \"1\") == \"1\"\n\t# In sequential mode, default to skipping datasets we've already trained\n\ttrain_new_only = os.getenv(\"TRAIN_NEW_ONLY\", \"1\" if sequential else \"0\") == \"1\"\n\tloaded_names: List[str] = []\n\ttotal_samples = 0\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tif sequential:\n\t\titer_program_texts = None\n\t\ttry:\n\t\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\t\titer_program_texts = _ipt\n\t\texcept Exception:\n\t\t\t# Fallback: import from file path to avoid relying on PYTHONPATH\n\t\t\ttry:\n\t\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp._tok_fn","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_llama_lora_train_mbpp._tok_fn#L314-L316","kind":"function","name":"_tok_fn","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":314,"end_line":316,"context_start_line":294,"context_end_line":336,"code":"\t\t\t\t# Save LoRA adapter incrementally and update state\n\t\t\t\tpaths.adapters_dir.mkdir(parents=True, exist_ok=True)\n\t\t\t\tmodel.save_pretrained(str(paths.adapters_dir))\n\t\t\t\ttry:\n\t\t\t\t\tstate = {}\n\t\t\t\t\tif state_path.exists():\n\t\t\t\t\t\tstate = json.loads(state_path.read_text())\n\t\t\t\t\tstate[\"last_adapter_path\"] = str(paths.adapters_dir)\n\t\t\t\t\tseen = set(state.get(\"datasets_seen\", []))\n\t\t\t\t\tseen.add(src_name)\n\t\t\t\t\tstate[\"datasets_seen\"] = sorted(list(seen))\n\t\t\t\t\tstate_path.write_text(json.dumps(state, indent=2), encoding=\"utf-8\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\t\tloaded_names.append(src_name)\n\t\t\t\ttotal_samples += len(ds_texts)\n\t\t\tif not saw_any:\n\t\t\t\t# Fallback to MBPP when nothing configured/available\n\t\t\t\tds_texts = _load_mbpp_texts(max_n=max_n)\n\t\t\t\traw = HFDataset.from_dict({\"text\": ds_texts})\n\t\t\t\tdef _tok_fn(row: Dict[str, Any]) -> Dict[str, Any]:\n\t\t\t\t\tout = tok(row[\"text\"], truncation=True, max_length=context_len, padding=\"max_length\")\n\t\t\t\t\treturn out\n\t\t\t\ttokenized = raw.map(_tok_fn, batched=False, remove_columns=[\"text\"])\n\t\telse:\n\t\t\t# Fall through to non-sequential path on missing iterator\n\t\t\tsequential = False\n\tif not sequential:\n\t\ttry:\n\t\t\tfrom examples.scripts.datasets_loader import load_program_texts # type: ignore\n\t\texcept Exception:\n\t\t\t# Fallback: load loader directly from file path without altering sys.path\n\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))\n\t\t\tif _spec and _spec.loader:\n\t\t\t\t_mod = _imputil.module_from_spec(_spec)\n\t\t\t\t_spec.loader.exec_module(_mod) # type: ignore\n\t\t\t\tload_program_texts = getattr(_mod, \"load_program_texts\") # type: ignore\n\t\t\telse:\n\t\t\t\traise\n\t\ttexts, loaded_names = load_program_texts(\n\t\t\texample_dir=str(example_dir),","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_llama_lora_train_mbpp.__init__#L203-L205","kind":"function","name":"__init__","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":203,"end_line":205,"context_start_line":183,"context_end_line":225,"code":"\t\t\tr=lora_r,\n\t\t\tlora_alpha=lora_alpha,\n\t\t\ttarget_modules=target_modules,\n\t\t\tlora_dropout=lora_dropout,\n\t\t\tbias=\"none\",\n\t\t\ttask_type=\"CAUSAL_LM\",\n\t\t)\n\t\tmodel = get_peft_model(model, lora_cfg)\n\t# Print trainable parameter summary without relying on PEFT helper\n\ttry:\n\t\ttrainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n\t\ttotal = sum(p.numel() for p in model.parameters())\n\t\tratio = (float(trainable) / float(max(1, total))) * 100.0\n\t\tprint(f\"trainable params: {trainable:,} || all params: {total:,} || trainable%: {ratio:.4f}\")\n\texcept Exception:\n\t\tpass\n\n\t# Trainer with custom loss that avoids passing labels to model forward\n\tfrom transformers import Trainer, TrainingArguments # type: ignore\n\tclass LossTrainer(Trainer):\n\t\tdef __init__(self, pad_id: int, *args, **kwargs) -> None:\n\t\t\tsuper().__init__(*args, **kwargs)\n\t\t\tself._pad_id = int(pad_id)\n\t\tdef compute_loss(self, model, inputs, return_outputs: bool = False, num_items_in_batch=None):\n\t\t\tlabels = inputs.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\treturn super().compute_loss(model, inputs, return_outputs=return_outputs)\n\t\t\tinp = {k: v for k, v in inputs.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = labels[..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(self._pad_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\treturn (loss, outputs) if return_outputs else loss\n\n\t# Build dataset(s) via configurable loader (local-first, then HF)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_llama_lora_train_mbpp.compute_loss","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_llama_lora_train_mbpp.compute_loss#L206-L222","kind":"function","name":"compute_loss","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":206,"end_line":222,"context_start_line":186,"context_end_line":242,"code":"\t\t\tlora_dropout=lora_dropout,\n\t\t\tbias=\"none\",\n\t\t\ttask_type=\"CAUSAL_LM\",\n\t\t)\n\t\tmodel = get_peft_model(model, lora_cfg)\n\t# Print trainable parameter summary without relying on PEFT helper\n\ttry:\n\t\ttrainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n\t\ttotal = sum(p.numel() for p in model.parameters())\n\t\tratio = (float(trainable) / float(max(1, total))) * 100.0\n\t\tprint(f\"trainable params: {trainable:,} || all params: {total:,} || trainable%: {ratio:.4f}\")\n\texcept Exception:\n\t\tpass\n\n\t# Trainer with custom loss that avoids passing labels to model forward\n\tfrom transformers import Trainer, TrainingArguments # type: ignore\n\tclass LossTrainer(Trainer):\n\t\tdef __init__(self, pad_id: int, *args, **kwargs) -> None:\n\t\t\tsuper().__init__(*args, **kwargs)\n\t\t\tself._pad_id = int(pad_id)\n\t\tdef compute_loss(self, model, inputs, return_outputs: bool = False, num_items_in_batch=None):\n\t\t\tlabels = inputs.get(\"labels\", None)\n\t\t\tif labels is None:\n\t\t\t\treturn super().compute_loss(model, inputs, return_outputs=return_outputs)\n\t\t\tinp = {k: v for k, v in inputs.items() if k != \"labels\"}\n\t\t\toutputs = model(**inp)\n\t\t\tlogits = outputs.get(\"logits\")\n\t\t\tvocab = logits.size(-1)\n\t\t\tshift_logits = logits[..., :-1, :].contiguous()\n\t\t\tshift_labels = labels[..., 1:].contiguous().to(shift_logits.device)\n\t\t\tshift_labels = shift_labels.masked_fill(shift_labels == int(self._pad_id), -100)\n\t\t\tloss = F.cross_entropy(\n\t\t\t\tshift_logits.view(-1, vocab),\n\t\t\t\tshift_labels.view(-1),\n\t\t\t\tignore_index=-100,\n\t\t\t)\n\t\t\treturn (loss, outputs) if return_outputs else loss\n\n\t# Build dataset(s) via configurable loader (local-first, then HF)\n\tdatasets_cfg = os.getenv(\"DATASETS_CONFIG\", \"\")\n\tsequential = os.getenv(\"SEQUENTIAL_TRAIN\", \"1\") == \"1\"\n\t# In sequential mode, default to skipping datasets we've already trained\n\ttrain_new_only = os.getenv(\"TRAIN_NEW_ONLY\", \"1\" if sequential else \"0\") == \"1\"\n\tloaded_names: List[str] = []\n\ttotal_samples = 0\n\tfrom datasets import Dataset as HFDataset # type: ignore\n\tif sequential:\n\t\titer_program_texts = None\n\t\ttry:\n\t\t\tfrom examples.scripts.datasets_loader import iter_program_texts as _ipt # type: ignore\n\t\t\titer_program_texts = _ipt\n\t\texcept Exception:\n\t\t\t# Fallback: import from file path to avoid relying on PYTHONPATH\n\t\t\ttry:\n\t\t\t\timport importlib.util as _imputil # type: ignore\n\t\t\t\t_loader_path = (example_dir.parent / \"scripts\" / \"datasets_loader.py\").resolve()\n\t\t\t\t_spec = _imputil.spec_from_file_location(\"datasets_loader\", str(_loader_path))","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_example","uri":"program://Program_Conditioned_Adapter/module/examples.dataset_trainer.run_smoke_example#L1-L213","kind":"module","name":"examples.dataset_trainer.run_smoke_example","path":"examples/dataset_trainer/run_smoke_example.py","language":"python","start_line":1,"end_line":213,"context_start_line":1,"context_end_line":213,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict, Any\nimport math\nimport random\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nROOT = EX_DIR\nPG_BACKEND = \"examples.scripts.dataset_graph:DatasetProgramGraph\"\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))\n\treturn os.system(\" \".join(cmd))\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids, \"attn_mask\": attn}\n\n\ndef main() -> None:\n\tcfg = load_program_config(str(ROOT))\n\tadapters_dir = cfg.paths.adapters_dir\n\tout_dir = cfg.paths.out_dir\n\ttelemetry_json = cfg.paths.telemetry_path\n\tadapters_dir.mkdir(parents=True, exist_ok=True)\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"\n\t# Reproducibility\n\tseed = int(os.getenv(\"SEED\", \"0\"))\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\ttorch.manual_seed(seed)\n\tif torch.cuda.is_available():\n\t\ttorch.cuda.manual_seed_all(seed)\n\n\t# 1) Optional: Build ProgramGraph caches/adapters (kept for consistent manifest/state)\n\trc = _run([\n\t\tsys.executable,\n\t\tstr(EX_DIR / \"build.py\"),\n\t\t\"--sources\", str(ROOT),\n\t\t\"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n\t\t\"--adapters-dir\", str(adapters_dir),\n\t\t\"--embed-dim\", \"256\",\n\t\t\"--include-text\",\n\t\t\"--text-max-bytes\", \"20000\",\n\t\t\"--pg-backend\", PG_BACKEND,\n\t\t\"--graph-prop-hops\", \"2\",\n\t\t\"--graph-prop-damp\", \"0.85\",\n\t\t\"--init-program-state\",\n\t\t\"--program-state-path\", str(cfg.paths.program_state_path),\n\t\t\"--seed\", \"0\",\n\t\t\"--verbose\",\n\t])\n\tif rc != 0:\n\t\tsys.exit(rc)\n\n\t# 2) Build a tokenized dataset: prefer MBPP if enabled, else synthetic\n\tfrom data.tokenizer import LocalLlamaTokenizer # type: ignore\n\t# Use a local snapshot directory if available; otherwise fallback to a whitespace tokenizer\n\tsnap_default = os.environ.get(\"LLAMA_SNAPSHOT_DIR\", str(repo_root / \"checkpoints\" / \"llama\"))\n\ttry:\n\t\ttok = LocalLlamaTokenizer(snap_default)\n\texcept Exception:\n\t\tfrom data.tokenizer import WhitespaceTokenizer # type: ignore\n\t\ttok = WhitespaceTokenizer()\n\ttexts = []\n\tuse_mbpp = os.getenv(\"USE_MBPP\", \"0\") == \"1\"\n\tif use_mbpp:\n\t\ttry:\n\t\t\tfrom examples.scripts.mbpp_loader import load_mbpp_texts # type: ignore\n\t\t\tmax_n = int(os.getenv(\"MBPP_MAX_N\", \"64\"))\n\t\t\tmbpp_texts, total = load_mbpp_texts(max_n=max_n, split=\"train\")\n\t\t\tif mbpp_texts:\n\t\t\t\tprint(f\"[mbpp] loaded {len(mbpp_texts)} / {total} samples\")\n\t\t\t\ttexts = mbpp_texts\n\t\texcept Exception as e:\n\t\t\tprint(f\"[mbpp] failed to load via HF datasets: {e}\")\n\tif not texts:\n\t\ttexts = [\n\t\t\t\"def add(x, y): return x + y\",\n\t\t\t\"class Foo:\\n def bar(self):\\n return 42\",\n\t\t\t\"for i in range(10): print(i)\",\n\t\t]\n\tds = SimpleLmDataset(tok, texts, max_len=128)\n\n\t# 3) Build a small model from our local stack and train for a few steps\n\tfrom specs.config import ModelConfig # type: ignore\n\tfrom model.factory import build_causal_lm # type: ignore\n\n\tcfg_m = ModelConfig(\n\t\td_model=256, n_heads=8, n_layers=2, d_ff=1024, vocab_size=int(getattr(tok, \"vocab_size\", 32000) or 32000),\n\t\tdtype=\"float32\", kv_cache_paged=False, attn_impl=\"eager\",\n\t)\n\tmodel = build_causal_lm(cfg_m, block=\"llama\", compress=None)\n\tmodel.train()\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tmodel = model.to(device)\n\n\tlr = float(os.getenv(\"LR\", \"1e-4\"))\n\topt = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=0.01)\n\t# Ignore padding in loss\n\tpad_id = int(getattr(tok, \"pad_token_id\", 0) or 0)\n\tloss_fn = torch.nn.CrossEntropyLoss(ignore_index=-100)\n\n\t# DataLoader with batching and shuffling\n\tbatch_size = int(os.getenv(\"BATCH_SIZE\", \"8\"))\n\tdl = DataLoader(ds, batch_size=batch_size, shuffle=True, drop_last=False)\n\n\tsteps = int(os.getenv(\"TRAIN_STEPS\", \"16\"))\n\tgrad_accum = int(os.getenv(\"GRAD_ACCUM_STEPS\", \"1\"))\n\twarmup_ratio = float(os.getenv(\"WARMUP_RATIO\", \"0.1\"))\n\twarmup_steps = int(os.getenv(\"WARMUP_STEPS\", str(max(1, int(steps * warmup_ratio))))))\n\tmin_lr_ratio = float(os.getenv(\"MIN_LR_RATIO\", \"0.1\"))\n\tdef _lr_lambda(current_step: int) -> float:\n\t\tif current_step < warmup_steps:\n\t\t\treturn max(1e-6, float(current_step + 1) / float(max(1, warmup_steps)))\n\t\tprogress = float(current_step - warmup_steps) / float(max(1, steps - warmup_steps))\n\t\tcosine = 0.5 * (1.0 + math.cos(math.pi * min(1.0, max(0.0, progress))))\n\t\treturn float(min_lr_ratio + (1.0 - min_lr_ratio) * cosine)\n\tscheduler = torch.optim.lr_scheduler.LambdaLR(opt, lr_lambda=_lr_lambda)\n\n\ttotal_loss = 0.0\n\tprint(f\"[train] steps={steps}, dataset_size={len(ds)}, batch_size={batch_size}, grad_accum={grad_accum}, lr={lr}\")\n\tdef infinite_batches():\n\t\twhile True:\n\t\t\tfor batch in dl:\n\t\t\t\tyield {k: v.to(device) for k, v in batch.items()}\n\tbatches = infinite_batches()\n\tfor step in range(steps):\n\t\topt.zero_grad(set_to_none=True)\n\t\tstep_loss = 0.0\n\t\tfor _ in range(max(1, grad_accum)):\n\t\t\tbatch = next(batches)\n\t\t\tout = model(input_ids=batch[\"input_ids\"], attention_mask=batch[\"attn_mask\"], return_dict=True)\n\t\t\tlogits = out[\"logits\"] # [B, T, V]\n\t\t\tvocab = logits.size(-1)\n\t\t\t# Shift logits and labels for next-token prediction\n\t\t\tlogits_shifted = logits[..., :-1, :].contiguous()\n\t\t\tlabels = batch[\"input_ids\"][..., 1:].contiguous()\n\t\t\t# Mask pad tokens in labels\n\t\t\tlabels = labels.masked_fill(labels == pad_id, -100)\n\t\t\tloss = loss_fn(\n\t\t\t\tlogits_shifted.view(-1, vocab),\n\t\t\t\tlabels.view(-1),\n\t\t\t) / float(max(1, grad_accum))\n\t\t\tloss.backward()\n\t\t\tstep_loss += float(loss.detach().item())\n\t\ttorch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n\t\topt.step()\n\t\tscheduler.step()\n\t\ttotal_loss += step_loss\n\t\tif (step + 1) % max(1, int(os.getenv(\"LOG_EVERY\", \"50\"))) == 0 or (step + 1) == steps:\n\t\t\tcur_lr = scheduler.get_last_lr()[0] if scheduler is not None else lr\n\t\t\tprint(f\"[train] step={step+1}/{steps} loss={step_loss:.4f} avg_loss={(total_loss/(step+1)):.4f} lr={cur_lr:.6g}\")\n\tavg_loss = total_loss / max(1, steps)\n\n\t# 4) Save a checkpoint stub under adapters_dir and emit a training report\n\tckpt_dir = adapters_dir / \"trained_ckpt\"\n\tckpt_dir.mkdir(parents=True, exist_ok=True)\n\ttorch.save(model.state_dict(), ckpt_dir / \"model.pt\")\n\treport = {\n\t\t\"avg_loss\": avg_loss,\n\t\t\"steps\": steps,\n\t\t\"device\": str(device),\n\t\t\"ckpt_path\": str(ckpt_dir / \"model.pt\"),\n\t}\n\tout_path = out_dir / \"DatasetTrainerReport.json\"\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\twith open(out_path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"DatasetTrainerReport\": report}, indent=2))\n\tprint(str(out_path))\n\tsys.exit(0)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","source_hash":"72b5c19646821abbe0c53b72778f365d64ba91e50a72a0c2ced947a5560e6e48","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_complete_example","uri":"program://Program_Conditioned_Adapter/module/examples.dataset_trainer.run_smoke_complete_example#L1-L183","kind":"module","name":"examples.dataset_trainer.run_smoke_complete_example","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":1,"end_line":183,"context_start_line":1,"context_end_line":183,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nROOT = EX_DIR\nPG_BACKEND = \"examples.scripts.dataset_graph:DatasetProgramGraph\"\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))\n\treturn os.system(\" \".join(cmd))\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids, \"attn_mask\": attn}\n\n\ndef main() -> None:\n\t# Load the base config (for program id/backend), but use a separate artifacts dir\n\t_ = load_program_config(str(ROOT))\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tprogram_state_path = adapters_dir / \".program_state.json\"\n\tadapters_dir.mkdir(parents=True, exist_ok=True)\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"\n\n\t# 1) Build ProgramGraph caches/adapters\n\trc = _run([\n\t\tsys.executable,\n\t\tstr(EX_DIR / \"build.py\"),\n\t\t\"--sources\", str(ROOT),\n\t\t\"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n\t\t\"--adapters-dir\", str(adapters_dir),\n\t\t\"--embed-dim\", \"256\",\n\t\t\"--include-text\",\n\t\t\"--text-max-bytes\", \"20000\",\n\t\t\"--pg-backend\", PG_BACKEND,\n\t\t\"--graph-prop-hops\", \"2\",\n\t\t\"--graph-prop-damp\", \"0.85\",\n\t\t\"--init-program-state\",\n\t\t\"--program-state-path\", str(program_state_path),\n\t\t\"--seed\", \"0\",\n\t\t\"--verbose\",\n\t])\n\tif rc != 0:\n\t\tsys.exit(rc)\n\n\t# 2) Build a tokenized dataset: always use full MBPP train split when available\n\tfrom data.tokenizer import LocalLlamaTokenizer # type: ignore\n\t# Use a local snapshot directory if available; otherwise fallback to a whitespace tokenizer\n\tsnap_default = os.environ.get(\"LLAMA_SNAPSHOT_DIR\", str(repo_root / \"checkpoints\" / \"llama\"))\n\ttry:\n\t\ttok = LocalLlamaTokenizer(snap_default)\n\texcept Exception:\n\t\tfrom data.tokenizer import WhitespaceTokenizer # type: ignore\n\t\ttok = WhitespaceTokenizer()\n\ttexts: List[str] = []\n\ttotal = 0\n\ttry:\n\t\tfrom examples.scripts.mbpp_loader import load_mbpp_texts_all_splits # type: ignore\n\t\t# Large cap to force \"all\" samples across all splits\n\t\tmbpp_texts, total = load_mbpp_texts_all_splits(max_n=1_000_000_000)\n\t\tif mbpp_texts:\n\t\t\tprint(f\"[mbpp] loaded {len(mbpp_texts)} / {total} samples (all splits)\")\n\t\t\ttexts = mbpp_texts\n\texcept Exception as e:\n\t\tprint(f\"[mbpp] failed to load via HF datasets: {e}\")\n\tif not texts:\n\t\t# Fallback tiny dataset\n\t\ttexts = [\n\t\t\t\"def add(x, y): return x + y\",\n\t\t\t\"class Foo:\\n def bar(self):\\n return 42\",\n\t\t\t\"for i in range(10): print(i)\",\n\t\t]\n\tds = SimpleLmDataset(tok, texts, max_len=128)\n\n\t# 3) Build a small model from our local stack and train for exactly one pass over ds\n\tfrom specs.config import ModelConfig # type: ignore\n\tfrom model.factory import build_causal_lm # type: ignore\n\n\tcfg_m = ModelConfig(\n\t\td_model=256, n_heads=8, n_layers=2, d_ff=1024, vocab_size=int(getattr(tok, \"vocab_size\", 32000) or 32000),\n\t\tdtype=\"float32\", kv_cache_paged=False, attn_impl=\"eager\",\n\t)\n\tmodel = build_causal_lm(cfg_m, block=\"llama\", compress=None)\n\tmodel.train()\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tmodel = model.to(device)\n\n\tlr = float(os.getenv(\"LR\", \"1e-4\"))\n\topt = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=0.01)\n\t# Ignore padding in loss\n\tpad_id = int(getattr(tok, \"pad_token_id\", 0) or 0)\n\tloss_fn = torch.nn.CrossEntropyLoss(ignore_index=-100)\n\n\t# DataLoader epoch training\n\tbatch_size = int(os.getenv(\"BATCH_SIZE\", \"8\"))\n\tdl = DataLoader(ds, batch_size=batch_size, shuffle=True, drop_last=False)\n\n\ttotal_loss = 0.0\n\tsteps = 0\n\tepochs = int(os.getenv(\"EPOCHS\", \"1\"))\n\tprint(f\"[train] epochs={epochs}, dataset_size={len(ds)}, batch_size={batch_size}\")\n\tfor _ in range(epochs):\n\t\tfor batch in dl:\n\t\t\tsteps += 1\n\t\t\topt.zero_grad(set_to_none=True)\n\t\t\tbatch = {k: v.to(device) for k, v in batch.items()}\n\t\t\tout = model(input_ids=batch[\"input_ids\"], attention_mask=batch[\"attn_mask\"], return_dict=True)\n\t\t\tlogits = out[\"logits\"] # [B, T, V]\n\t\t\tvocab = logits.size(-1)\n\t\t\t# Shift and mask labels\n\t\t\tlogits_shifted = logits[..., :-1, :].contiguous()\n\t\t\tlabels = batch[\"input_ids\"][..., 1:].contiguous()\n\t\t\tlabels = labels.masked_fill(labels == pad_id, -100)\n\t\t\tloss = loss_fn(\n\t\t\t\tlogits_shifted.view(-1, vocab),\n\t\t\t\tlabels.view(-1),\n\t\t\t)\n\t\t\tloss.backward()\n\t\t\ttorch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n\t\t\topt.step()\n\t\t\ttotal_loss += float(loss.detach().item())\n\tavg_loss = total_loss / max(1, steps)\n\n\t# 4) Save a checkpoint under adapters_dir and emit a training report\n\tckpt_dir = adapters_dir / \"trained_ckpt\"\n\tckpt_dir.mkdir(parents=True, exist_ok=True)\n\ttorch.save(model.state_dict(), ckpt_dir / \"model.pt\")\n\treport = {\n\t\t\"avg_loss\": avg_loss,\n\t\t\"steps\": steps,\n\t\t\"device\": str(device),\n\t\t\"ckpt_path\": str(ckpt_dir / \"model.pt\"),\n\t}\n\tout_path = out_dir / \"DatasetTrainerReport.json\"\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\twith open(out_path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"DatasetTrainerReport\": report}, indent=2))\n\tprint(str(out_path))\n\tsys.exit(0)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_complete_example._run","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_smoke_complete_example._run#L20-L22","kind":"function","name":"_run","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":20,"end_line":22,"context_start_line":1,"context_end_line":42,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nROOT = EX_DIR\nPG_BACKEND = \"examples.scripts.dataset_graph:DatasetProgramGraph\"\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))\n\treturn os.system(\" \".join(cmd))\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_complete_example.SimpleLmDataset","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.run_smoke_complete_example.SimpleLmDataset#L25-L44","kind":"class","name":"SimpleLmDataset","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":25,"end_line":44,"context_start_line":5,"context_end_line":64,"code":"import json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nROOT = EX_DIR\nPG_BACKEND = \"examples.scripts.dataset_graph:DatasetProgramGraph\"\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))\n\treturn os.system(\" \".join(cmd))\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids, \"attn_mask\": attn}\n\n\ndef main() -> None:\n\t# Load the base config (for program id/backend), but use a separate artifacts dir\n\t_ = load_program_config(str(ROOT))\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tprogram_state_path = adapters_dir / \".program_state.json\"\n\tadapters_dir.mkdir(parents=True, exist_ok=True)\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"\n\n\t# 1) Build ProgramGraph caches/adapters\n\trc = _run([","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_complete_example.main","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_smoke_complete_example.main#L47-L177","kind":"function","name":"main","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":47,"end_line":177,"context_start_line":27,"context_end_line":183,"code":"\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids, \"attn_mask\": attn}\n\n\ndef main() -> None:\n\t# Load the base config (for program id/backend), but use a separate artifacts dir\n\t_ = load_program_config(str(ROOT))\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tprogram_state_path = adapters_dir / \".program_state.json\"\n\tadapters_dir.mkdir(parents=True, exist_ok=True)\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"\n\n\t# 1) Build ProgramGraph caches/adapters\n\trc = _run([\n\t\tsys.executable,\n\t\tstr(EX_DIR / \"build.py\"),\n\t\t\"--sources\", str(ROOT),\n\t\t\"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n\t\t\"--adapters-dir\", str(adapters_dir),\n\t\t\"--embed-dim\", \"256\",\n\t\t\"--include-text\",\n\t\t\"--text-max-bytes\", \"20000\",\n\t\t\"--pg-backend\", PG_BACKEND,\n\t\t\"--graph-prop-hops\", \"2\",\n\t\t\"--graph-prop-damp\", \"0.85\",\n\t\t\"--init-program-state\",\n\t\t\"--program-state-path\", str(program_state_path),\n\t\t\"--seed\", \"0\",\n\t\t\"--verbose\",\n\t])\n\tif rc != 0:\n\t\tsys.exit(rc)\n\n\t# 2) Build a tokenized dataset: always use full MBPP train split when available\n\tfrom data.tokenizer import LocalLlamaTokenizer # type: ignore\n\t# Use a local snapshot directory if available; otherwise fallback to a whitespace tokenizer\n\tsnap_default = os.environ.get(\"LLAMA_SNAPSHOT_DIR\", str(repo_root / \"checkpoints\" / \"llama\"))\n\ttry:\n\t\ttok = LocalLlamaTokenizer(snap_default)\n\texcept Exception:\n\t\tfrom data.tokenizer import WhitespaceTokenizer # type: ignore\n\t\ttok = WhitespaceTokenizer()\n\ttexts: List[str] = []\n\ttotal = 0\n\ttry:\n\t\tfrom examples.scripts.mbpp_loader import load_mbpp_texts_all_splits # type: ignore\n\t\t# Large cap to force \"all\" samples across all splits\n\t\tmbpp_texts, total = load_mbpp_texts_all_splits(max_n=1_000_000_000)\n\t\tif mbpp_texts:\n\t\t\tprint(f\"[mbpp] loaded {len(mbpp_texts)} / {total} samples (all splits)\")\n\t\t\ttexts = mbpp_texts\n\texcept Exception as e:\n\t\tprint(f\"[mbpp] failed to load via HF datasets: {e}\")\n\tif not texts:\n\t\t# Fallback tiny dataset\n\t\ttexts = [\n\t\t\t\"def add(x, y): return x + y\",\n\t\t\t\"class Foo:\\n def bar(self):\\n return 42\",\n\t\t\t\"for i in range(10): print(i)\",\n\t\t]\n\tds = SimpleLmDataset(tok, texts, max_len=128)\n\n\t# 3) Build a small model from our local stack and train for exactly one pass over ds\n\tfrom specs.config import ModelConfig # type: ignore\n\tfrom model.factory import build_causal_lm # type: ignore\n\n\tcfg_m = ModelConfig(\n\t\td_model=256, n_heads=8, n_layers=2, d_ff=1024, vocab_size=int(getattr(tok, \"vocab_size\", 32000) or 32000),\n\t\tdtype=\"float32\", kv_cache_paged=False, attn_impl=\"eager\",\n\t)\n\tmodel = build_causal_lm(cfg_m, block=\"llama\", compress=None)\n\tmodel.train()\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tmodel = model.to(device)\n\n\tlr = float(os.getenv(\"LR\", \"1e-4\"))\n\topt = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=0.01)\n\t# Ignore padding in loss\n\tpad_id = int(getattr(tok, \"pad_token_id\", 0) or 0)\n\tloss_fn = torch.nn.CrossEntropyLoss(ignore_index=-100)\n\n\t# DataLoader epoch training\n\tbatch_size = int(os.getenv(\"BATCH_SIZE\", \"8\"))\n\tdl = DataLoader(ds, batch_size=batch_size, shuffle=True, drop_last=False)\n\n\ttotal_loss = 0.0\n\tsteps = 0\n\tepochs = int(os.getenv(\"EPOCHS\", \"1\"))\n\tprint(f\"[train] epochs={epochs}, dataset_size={len(ds)}, batch_size={batch_size}\")\n\tfor _ in range(epochs):\n\t\tfor batch in dl:\n\t\t\tsteps += 1\n\t\t\topt.zero_grad(set_to_none=True)\n\t\t\tbatch = {k: v.to(device) for k, v in batch.items()}\n\t\t\tout = model(input_ids=batch[\"input_ids\"], attention_mask=batch[\"attn_mask\"], return_dict=True)\n\t\t\tlogits = out[\"logits\"] # [B, T, V]\n\t\t\tvocab = logits.size(-1)\n\t\t\t# Shift and mask labels\n\t\t\tlogits_shifted = logits[..., :-1, :].contiguous()\n\t\t\tlabels = batch[\"input_ids\"][..., 1:].contiguous()\n\t\t\tlabels = labels.masked_fill(labels == pad_id, -100)\n\t\t\tloss = loss_fn(\n\t\t\t\tlogits_shifted.view(-1, vocab),\n\t\t\t\tlabels.view(-1),\n\t\t\t)\n\t\t\tloss.backward()\n\t\t\ttorch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n\t\t\topt.step()\n\t\t\ttotal_loss += float(loss.detach().item())\n\tavg_loss = total_loss / max(1, steps)\n\n\t# 4) Save a checkpoint under adapters_dir and emit a training report\n\tckpt_dir = adapters_dir / \"trained_ckpt\"\n\tckpt_dir.mkdir(parents=True, exist_ok=True)\n\ttorch.save(model.state_dict(), ckpt_dir / \"model.pt\")\n\treport = {\n\t\t\"avg_loss\": avg_loss,\n\t\t\"steps\": steps,\n\t\t\"device\": str(device),\n\t\t\"ckpt_path\": str(ckpt_dir / \"model.pt\"),\n\t}\n\tout_path = out_dir / \"DatasetTrainerReport.json\"\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\twith open(out_path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"DatasetTrainerReport\": report}, indent=2))\n\tprint(str(out_path))\n\tsys.exit(0)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_complete_example.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_smoke_complete_example.__init__#L26-L32","kind":"function","name":"__init__","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":26,"end_line":32,"context_start_line":6,"context_end_line":52,"code":"from pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nROOT = EX_DIR\nPG_BACKEND = \"examples.scripts.dataset_graph:DatasetProgramGraph\"\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))\n\treturn os.system(\" \".join(cmd))\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids, \"attn_mask\": attn}\n\n\ndef main() -> None:\n\t# Load the base config (for program id/backend), but use a separate artifacts dir\n\t_ = load_program_config(str(ROOT))\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tadapters_dir = artifacts_root","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_complete_example.__len__","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_smoke_complete_example.__len__#L34-L35","kind":"function","name":"__len__","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":34,"end_line":35,"context_start_line":14,"context_end_line":55,"code":"try:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))\n\treturn os.system(\" \".join(cmd))\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids, \"attn_mask\": attn}\n\n\ndef main() -> None:\n\t# Load the base config (for program id/backend), but use a separate artifacts dir\n\t_ = load_program_config(str(ROOT))\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tprogram_state_path = adapters_dir / \".program_state.json\"\n\tadapters_dir.mkdir(parents=True, exist_ok=True)","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_smoke_complete_example.__getitem__","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_smoke_complete_example.__getitem__#L37-L44","kind":"function","name":"__getitem__","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":37,"end_line":44,"context_start_line":17,"context_end_line":64,"code":"\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))\n\treturn os.system(\" \".join(cmd))\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\t# pad left simple\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids, \"attn_mask\": attn}\n\n\ndef main() -> None:\n\t# Load the base config (for program id/backend), but use a separate artifacts dir\n\t_ = load_program_config(str(ROOT))\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tprogram_state_path = adapters_dir / \".program_state.json\"\n\tadapters_dir.mkdir(parents=True, exist_ok=True)\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"\n\n\t# 1) Build ProgramGraph caches/adapters\n\trc = _run([","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.program_config","uri":"program://Program_Conditioned_Adapter/module/examples.dataset_trainer.program_config#L1-L59","kind":"module","name":"examples.dataset_trainer.program_config","path":"examples/dataset_trainer/program_config.py","language":"python","start_line":1,"end_line":59,"context_start_line":1,"context_end_line":59,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n\trequire_citations: bool = False\n\tcitations_per_paragraph: bool = False\n\tretrieval_policy: str = \"sim:0.5,struct:0.5\"\n\tretrieval_temp: float = 0.6\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\ttelemetry_path: Path\n\tprogram_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n\tprogram_id: str\n\tpg_backend: str\n\tpaths: ProgramPaths\n\tcontracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n\treturn \"examples.scripts.dataset_graph:DatasetProgramGraph\"\n\n\ndef load_program_config(root: str) -> ProgramConfig:\n\troot_path = Path(root).resolve()\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\ttelemetry_path = out_dir / \"structured_output.json\"\n\tpg_backend = _detect_pg_backend(example_dir)\n\tprogram_id = root_path.name or \"datasets\"\n\treturn ProgramConfig(\n\t\tprogram_id=program_id,\n\t\tpg_backend=pg_backend,\n\t\tpaths=ProgramPaths(\n\t\t\tadapters_dir=adapters_dir,\n\t\t\tout_dir=out_dir,\n\t\t\ttelemetry_path=telemetry_path,\n\t\t\tprogram_state_path=adapters_dir / \".program_state.json\",\n\t\t),\n\t\tcontracts=ProgramContracts(),\n\t)\n\n\n\n","source_hash":"129f956b9b6cd80b8d3a84c0389fe0a66e0909d562fe3d459925869cdf56eb85","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.program_config.ProgramContracts","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.program_config.ProgramContracts#L9-L13","kind":"class","name":"ProgramContracts","path":"examples/dataset_trainer/program_config.py","language":"python","start_line":9,"end_line":13,"context_start_line":1,"context_end_line":33,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n\trequire_citations: bool = False\n\tcitations_per_paragraph: bool = False\n\tretrieval_policy: str = \"sim:0.5,struct:0.5\"\n\tretrieval_temp: float = 0.6\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\ttelemetry_path: Path\n\tprogram_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n\tprogram_id: str\n\tpg_backend: str\n\tpaths: ProgramPaths\n\tcontracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n\treturn \"examples.scripts.dataset_graph:DatasetProgramGraph\"","source_hash":"129f956b9b6cd80b8d3a84c0389fe0a66e0909d562fe3d459925869cdf56eb85","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.program_config.ProgramPaths","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.program_config.ProgramPaths#L17-L21","kind":"class","name":"ProgramPaths","path":"examples/dataset_trainer/program_config.py","language":"python","start_line":17,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n\trequire_citations: bool = False\n\tcitations_per_paragraph: bool = False\n\tretrieval_policy: str = \"sim:0.5,struct:0.5\"\n\tretrieval_temp: float = 0.6\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\ttelemetry_path: Path\n\tprogram_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n\tprogram_id: str\n\tpg_backend: str\n\tpaths: ProgramPaths\n\tcontracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n\treturn \"examples.scripts.dataset_graph:DatasetProgramGraph\"\n\n\ndef load_program_config(root: str) -> ProgramConfig:\n\troot_path = Path(root).resolve()\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"","source_hash":"129f956b9b6cd80b8d3a84c0389fe0a66e0909d562fe3d459925869cdf56eb85","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.program_config.ProgramConfig","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.program_config.ProgramConfig#L25-L29","kind":"class","name":"ProgramConfig","path":"examples/dataset_trainer/program_config.py","language":"python","start_line":25,"end_line":29,"context_start_line":5,"context_end_line":49,"code":"from typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n\trequire_citations: bool = False\n\tcitations_per_paragraph: bool = False\n\tretrieval_policy: str = \"sim:0.5,struct:0.5\"\n\tretrieval_temp: float = 0.6\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\ttelemetry_path: Path\n\tprogram_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n\tprogram_id: str\n\tpg_backend: str\n\tpaths: ProgramPaths\n\tcontracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n\treturn \"examples.scripts.dataset_graph:DatasetProgramGraph\"\n\n\ndef load_program_config(root: str) -> ProgramConfig:\n\troot_path = Path(root).resolve()\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\ttelemetry_path = out_dir / \"structured_output.json\"\n\tpg_backend = _detect_pg_backend(example_dir)\n\tprogram_id = root_path.name or \"datasets\"\n\treturn ProgramConfig(\n\t\tprogram_id=program_id,\n\t\tpg_backend=pg_backend,\n\t\tpaths=ProgramPaths(\n\t\t\tadapters_dir=adapters_dir,","source_hash":"129f956b9b6cd80b8d3a84c0389fe0a66e0909d562fe3d459925869cdf56eb85","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.program_config._detect_pg_backend","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.program_config._detect_pg_backend#L32-L33","kind":"function","name":"_detect_pg_backend","path":"examples/dataset_trainer/program_config.py","language":"python","start_line":32,"end_line":33,"context_start_line":12,"context_end_line":53,"code":"\tretrieval_policy: str = \"sim:0.5,struct:0.5\"\n\tretrieval_temp: float = 0.6\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\ttelemetry_path: Path\n\tprogram_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n\tprogram_id: str\n\tpg_backend: str\n\tpaths: ProgramPaths\n\tcontracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n\treturn \"examples.scripts.dataset_graph:DatasetProgramGraph\"\n\n\ndef load_program_config(root: str) -> ProgramConfig:\n\troot_path = Path(root).resolve()\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\ttelemetry_path = out_dir / \"structured_output.json\"\n\tpg_backend = _detect_pg_backend(example_dir)\n\tprogram_id = root_path.name or \"datasets\"\n\treturn ProgramConfig(\n\t\tprogram_id=program_id,\n\t\tpg_backend=pg_backend,\n\t\tpaths=ProgramPaths(\n\t\t\tadapters_dir=adapters_dir,\n\t\t\tout_dir=out_dir,\n\t\t\ttelemetry_path=telemetry_path,\n\t\t\tprogram_state_path=adapters_dir / \".program_state.json\",\n\t\t),","source_hash":"129f956b9b6cd80b8d3a84c0389fe0a66e0909d562fe3d459925869cdf56eb85","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.program_config.load_program_config","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.program_config.load_program_config#L36-L55","kind":"function","name":"load_program_config","path":"examples/dataset_trainer/program_config.py","language":"python","start_line":36,"end_line":55,"context_start_line":16,"context_end_line":59,"code":"@dataclass(frozen=True)\nclass ProgramPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\ttelemetry_path: Path\n\tprogram_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n\tprogram_id: str\n\tpg_backend: str\n\tpaths: ProgramPaths\n\tcontracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n\treturn \"examples.scripts.dataset_graph:DatasetProgramGraph\"\n\n\ndef load_program_config(root: str) -> ProgramConfig:\n\troot_path = Path(root).resolve()\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\ttelemetry_path = out_dir / \"structured_output.json\"\n\tpg_backend = _detect_pg_backend(example_dir)\n\tprogram_id = root_path.name or \"datasets\"\n\treturn ProgramConfig(\n\t\tprogram_id=program_id,\n\t\tpg_backend=pg_backend,\n\t\tpaths=ProgramPaths(\n\t\t\tadapters_dir=adapters_dir,\n\t\t\tout_dir=out_dir,\n\t\t\ttelemetry_path=telemetry_path,\n\t\t\tprogram_state_path=adapters_dir / \".program_state.json\",\n\t\t),\n\t\tcontracts=ProgramContracts(),\n\t)\n\n\n\n","source_hash":"129f956b9b6cd80b8d3a84c0389fe0a66e0909d562fe3d459925869cdf56eb85","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_mbpp_test_eval","uri":"program://Program_Conditioned_Adapter/module/examples.dataset_trainer.run_mbpp_test_eval#L1-L224","kind":"module","name":"examples.dataset_trainer.run_mbpp_test_eval","path":"examples/dataset_trainer/run_mbpp_test_eval.py","language":"python","start_line":1,"end_line":224,"context_start_line":1,"context_end_line":224,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset\n\nROOT = EX_DIR\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids.unsqueeze(0), \"attn_mask\": attn.unsqueeze(0)}\n\n\ndef main() -> None:\n\t# Prefer evaluating the \"complete\" run by default\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tif not artifacts_root.exists():\n\t\t# Fallback to smoke artifacts if complete doesn't exist\n\t\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tckpt_path = adapters_dir / \"trained_ckpt\" / \"model.pt\"\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\teval_random = os.getenv(\"EVAL_RANDOM\", \"0\") == \"1\"\n\teval_model = os.getenv(\"EVAL_MODEL\", \"local\") # \"local\" (toy LM) or \"llama_hf\"\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"\n\n\t# Tokenizer(s)\n\tif eval_model == \"llama_hf\":\n\t\t# Hugging Face Llama tokenizer for baseline (no adapters)\n\t\ttry:\n\t\t\tfrom transformers import AutoTokenizer # type: ignore\n\t\t\thf_tok = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n\t\t\t# Ensure pad token is set for batching; use eos as pad\n\t\t\tif hf_tok.pad_token_id is None and hf_tok.eos_token_id is not None:\n\t\t\t\thf_tok.pad_token = hf_tok.eos_token\n\t\texcept Exception as e:\n\t\t\tprint(f\"[eval] Failed to load HF tokenizer: {e}\")\n\t\t\treturn\n\telse:\n\t\tfrom data.tokenizer import LocalLlamaTokenizer # type: ignore\n\t\ttry:\n\t\t\tsnap_default = os.environ.get(\"LLAMA_SNAPSHOT_DIR\", str(repo_root / \"checkpoints\" / \"llama\"))\n\t\t\ttok = LocalLlamaTokenizer(snap_default)\n\t\texcept Exception:\n\t\t\tfrom data.tokenizer import WhitespaceTokenizer # type: ignore\n\t\t\ttok = WhitespaceTokenizer()\n\n\t# Load MBPP test split\n\ttexts: List[str] = []\n\ttotal = 0\n\ttry:\n\t\tfrom examples.scripts.mbpp_loader import load_mbpp_texts # type: ignore\n\t\ttexts, total = load_mbpp_texts(max_n=1_000_000, split=\"test\")\n\t\tprint(f\"[mbpp:test] loaded {len(texts)} / {total} samples\")\n\texcept Exception as e:\n\t\tprint(f\"[mbpp:test] failed to load via HF datasets: {e}\")\n\tif not texts:\n\t\tprint(\"[eval] No test texts available; exiting with empty report.\")\n\t\treport = {\n\t\t\t\"split\": \"test\",\n\t\t\t\"avg_loss\": None,\n\t\t\t\"ppl\": None,\n\t\t\t\"samples\": 0,\n\t\t\t\"device\": \"cpu\",\n\t\t\t\"ckpt_path\": str(ckpt_path),\n\t\t}\n\t\tout_path = out_dir / \"MBPPEvalTestReport.json\"\n\t\twith open(out_path, \"w\", encoding=\"utf-8\") as fh:\n\t\t\tfh.write(json.dumps({\"schema_version\": 1, \"EvalReport\": report}, indent=2))\n\t\tprint(str(out_path))\n\t\treturn\n\n\t# Build dataset or leave as raw texts depending on eval model\n\tif eval_model == \"llama_hf\":\n\t\tds = None # we will use hf_tok directly on texts per-sample\n\telse:\n\t\tds = SimpleLmDataset(tok, texts, max_len=128)\n\n\t# Model selection\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tif eval_model == \"llama_hf\":\n\t\ttry:\n\t\t\tfrom transformers import AutoModelForCausalLM # type: ignore\n\t\t\thf_model = AutoModelForCausalLM.from_pretrained(\n\t\t\t\t\"meta-llama/Llama-3.1-8B-Instruct\",\n\t\t\t\tdevice_map=\"auto\" if torch.cuda.is_available() else None,\n\t\t\t\ttorch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n\t\t\t)\n\t\t\t# Optional: load PEFT/LoRA adapter when provided\n\t\t\tadapter_path = os.getenv(\"ADAPTER_PATH\", \"\").strip()\n\t\t\tif adapter_path:\n\t\t\t\ttry:\n\t\t\t\t\tfrom peft import PeftModel # type: ignore\n\t\t\t\t\thf_model = PeftModel.from_pretrained(hf_model, adapter_path)\n\t\t\t\t\tprint(f\"[eval] loaded adapter from {adapter_path}\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(f\"[eval] Failed to load adapter at {adapter_path}: {e}\")\n\t\t\thf_model.eval()\n\t\texcept Exception as e:\n\t\t\tprint(f\"[eval] Failed to load HF model: {e}\")\n\t\t\treturn\n\telse:\n\t\t# Local tiny LM (trained vs random)\n\t\tfrom specs.config import ModelConfig # type: ignore\n\t\tfrom model.factory import build_causal_lm # type: ignore\n\t\tcfg_m = ModelConfig(\n\t\t\td_model=256, n_heads=8, n_layers=2, d_ff=1024, vocab_size=int(getattr(tok, \"vocab_size\", 32000) or 32000),\n\t\t\tdtype=\"float32\", kv_cache_paged=False, attn_impl=\"eager\",\n\t\t)\n\t\tmodel = build_causal_lm(cfg_m, block=\"llama\", compress=None)\n\t\tmodel = model.to(device)\n\t\tif eval_random:\n\t\t\tprint(\"[eval] EVAL_RANDOM=1 set, skipping checkpoint load (random-initialized model).\")\n\t\telse:\n\t\t\tif ckpt_path.exists():\n\t\t\t\tstate = torch.load(ckpt_path, map_location=device)\n\t\t\t\tmodel.load_state_dict(state, strict=False)\n\t\t\telse:\n\t\t\t\tprint(f\"[eval] Warning: checkpoint not found at {ckpt_path}, evaluating random-initialized model.\")\n\t\tmodel.eval()\n\n\t# Evaluate\n\t# Use pad-aware loss\n\tif eval_model == \"llama_hf\":\n\t\tpad_id = int(getattr(hf_tok, \"pad_token_id\", 0) or 0)\n\telse:\n\t\tpad_id = int(getattr(tok, \"pad_token_id\", 0) or 0)\n\tloss_fn = torch.nn.CrossEntropyLoss(ignore_index=-100)\n\ttotal_loss = 0.0\n\tnum = 0\n\twith torch.no_grad():\n\t\tif eval_model == \"llama_hf\":\n\t\t\tmax_len = 128\n\t\t\tfor t in texts:\n\t\t\t\tenc = hf_tok(t, return_tensors=\"pt\", truncation=True, max_length=max_len, padding=\"max_length\")\n\t\t\t\tinput_ids = enc[\"input_ids\"].to(device if torch.cuda.is_available() else \"cpu\")\n\t\t\t\tattn_mask = enc[\"attention_mask\"].to(device if torch.cuda.is_available() else \"cpu\")\n\t\t\t\tout = hf_model(input_ids=input_ids, attention_mask=attn_mask, return_dict=True)\n\t\t\t\tlogits = out[\"logits\"]\n\t\t\t\tvocab = logits.size(-1)\n\t\t\t\tlabels = input_ids[..., 1:].contiguous()\n\t\t\t\tlabels = labels.masked_fill(labels == pad_id, -100)\n\t\t\t\tloss = loss_fn(\n\t\t\t\t\tlogits[..., :-1, :].contiguous().view(-1, vocab),\n\t\t\t\t\tlabels.view(-1),\n\t\t\t\t)\n\t\t\t\ttotal_loss += float(loss.detach().item())\n\t\t\t\tnum += 1\n\t\telse:\n\t\t\tfor i in range(len(ds)):\n\t\t\t\tbatch = ds[i]\n\t\t\t\tbatch = {k: v.to(device) for k, v in batch.items()}\n\t\t\t\tout = model(input_ids=batch[\"input_ids\"], attention_mask=batch[\"attn_mask\"], return_dict=True)\n\t\t\t\tlogits = out[\"logits\"] # [B, T, V]\n\t\t\t\tvocab = logits.size(-1)\n\t\t\t\tlabels = batch[\"input_ids\"][..., 1:].contiguous()\n\t\t\t\tlabels = labels.masked_fill(labels == pad_id, -100)\n\t\t\t\tloss = loss_fn(\n\t\t\t\t\tlogits[..., :-1, :].contiguous().view(-1, vocab),\n\t\t\t\t\tlabels.view(-1),\n\t\t\t\t)\n\t\t\t\ttotal_loss += float(loss.detach().item())\n\t\t\t\tnum += 1\n\tavg_loss = total_loss / max(1, num)\n\ttry:\n\t\timport math\n\t\tppl = float(math.exp(avg_loss))\n\texcept Exception:\n\t\tppl = None\n\n\treport = {\n\t\t\"split\": \"test\",\n\t\t\"avg_loss\": avg_loss,\n\t\t\"ppl\": ppl,\n\t\t\"samples\": num,\n\t\t\"device\": str(device),\n\t\t\"ckpt_path\": str(ckpt_path),\n\t}\n\tif eval_model == \"llama_hf\":\n\t\tout_filename = \"MBPPEvalTestReport.llama.json\"\n\telse:\n\t\tout_filename = \"MBPPEvalTestReport.random.json\" if eval_random else \"MBPPEvalTestReport.json\"\n\tout_path = out_dir / out_filename\n\twith open(out_path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"EvalReport\": report}, indent=2))\n\tprint(str(out_path))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"4a8585424d951009c2013785d3f0e83ddf8f75cbd6510db7aad9b48a641a59cc","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_mbpp_test_eval.SimpleLmDataset","uri":"program://Program_Conditioned_Adapter/class/examples.dataset_trainer.run_mbpp_test_eval.SimpleLmDataset#L19-L37","kind":"class","name":"SimpleLmDataset","path":"examples/dataset_trainer/run_mbpp_test_eval.py","language":"python","start_line":19,"end_line":37,"context_start_line":1,"context_end_line":57,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset\n\nROOT = EX_DIR\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids.unsqueeze(0), \"attn_mask\": attn.unsqueeze(0)}\n\n\ndef main() -> None:\n\t# Prefer evaluating the \"complete\" run by default\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tif not artifacts_root.exists():\n\t\t# Fallback to smoke artifacts if complete doesn't exist\n\t\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tckpt_path = adapters_dir / \"trained_ckpt\" / \"model.pt\"\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\teval_random = os.getenv(\"EVAL_RANDOM\", \"0\") == \"1\"\n\teval_model = os.getenv(\"EVAL_MODEL\", \"local\") # \"local\" (toy LM) or \"llama_hf\"\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"","source_hash":"4a8585424d951009c2013785d3f0e83ddf8f75cbd6510db7aad9b48a641a59cc","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_mbpp_test_eval.main","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_mbpp_test_eval.main#L40-L218","kind":"function","name":"main","path":"examples/dataset_trainer/run_mbpp_test_eval.py","language":"python","start_line":40,"end_line":218,"context_start_line":20,"context_end_line":224,"code":"\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids.unsqueeze(0), \"attn_mask\": attn.unsqueeze(0)}\n\n\ndef main() -> None:\n\t# Prefer evaluating the \"complete\" run by default\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tif not artifacts_root.exists():\n\t\t# Fallback to smoke artifacts if complete doesn't exist\n\t\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tckpt_path = adapters_dir / \"trained_ckpt\" / \"model.pt\"\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\teval_random = os.getenv(\"EVAL_RANDOM\", \"0\") == \"1\"\n\teval_model = os.getenv(\"EVAL_MODEL\", \"local\") # \"local\" (toy LM) or \"llama_hf\"\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"\n\n\t# Tokenizer(s)\n\tif eval_model == \"llama_hf\":\n\t\t# Hugging Face Llama tokenizer for baseline (no adapters)\n\t\ttry:\n\t\t\tfrom transformers import AutoTokenizer # type: ignore\n\t\t\thf_tok = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n\t\t\t# Ensure pad token is set for batching; use eos as pad\n\t\t\tif hf_tok.pad_token_id is None and hf_tok.eos_token_id is not None:\n\t\t\t\thf_tok.pad_token = hf_tok.eos_token\n\t\texcept Exception as e:\n\t\t\tprint(f\"[eval] Failed to load HF tokenizer: {e}\")\n\t\t\treturn\n\telse:\n\t\tfrom data.tokenizer import LocalLlamaTokenizer # type: ignore\n\t\ttry:\n\t\t\tsnap_default = os.environ.get(\"LLAMA_SNAPSHOT_DIR\", str(repo_root / \"checkpoints\" / \"llama\"))\n\t\t\ttok = LocalLlamaTokenizer(snap_default)\n\t\texcept Exception:\n\t\t\tfrom data.tokenizer import WhitespaceTokenizer # type: ignore\n\t\t\ttok = WhitespaceTokenizer()\n\n\t# Load MBPP test split\n\ttexts: List[str] = []\n\ttotal = 0\n\ttry:\n\t\tfrom examples.scripts.mbpp_loader import load_mbpp_texts # type: ignore\n\t\ttexts, total = load_mbpp_texts(max_n=1_000_000, split=\"test\")\n\t\tprint(f\"[mbpp:test] loaded {len(texts)} / {total} samples\")\n\texcept Exception as e:\n\t\tprint(f\"[mbpp:test] failed to load via HF datasets: {e}\")\n\tif not texts:\n\t\tprint(\"[eval] No test texts available; exiting with empty report.\")\n\t\treport = {\n\t\t\t\"split\": \"test\",\n\t\t\t\"avg_loss\": None,\n\t\t\t\"ppl\": None,\n\t\t\t\"samples\": 0,\n\t\t\t\"device\": \"cpu\",\n\t\t\t\"ckpt_path\": str(ckpt_path),\n\t\t}\n\t\tout_path = out_dir / \"MBPPEvalTestReport.json\"\n\t\twith open(out_path, \"w\", encoding=\"utf-8\") as fh:\n\t\t\tfh.write(json.dumps({\"schema_version\": 1, \"EvalReport\": report}, indent=2))\n\t\tprint(str(out_path))\n\t\treturn\n\n\t# Build dataset or leave as raw texts depending on eval model\n\tif eval_model == \"llama_hf\":\n\t\tds = None # we will use hf_tok directly on texts per-sample\n\telse:\n\t\tds = SimpleLmDataset(tok, texts, max_len=128)\n\n\t# Model selection\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tif eval_model == \"llama_hf\":\n\t\ttry:\n\t\t\tfrom transformers import AutoModelForCausalLM # type: ignore\n\t\t\thf_model = AutoModelForCausalLM.from_pretrained(\n\t\t\t\t\"meta-llama/Llama-3.1-8B-Instruct\",\n\t\t\t\tdevice_map=\"auto\" if torch.cuda.is_available() else None,\n\t\t\t\ttorch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n\t\t\t)\n\t\t\t# Optional: load PEFT/LoRA adapter when provided\n\t\t\tadapter_path = os.getenv(\"ADAPTER_PATH\", \"\").strip()\n\t\t\tif adapter_path:\n\t\t\t\ttry:\n\t\t\t\t\tfrom peft import PeftModel # type: ignore\n\t\t\t\t\thf_model = PeftModel.from_pretrained(hf_model, adapter_path)\n\t\t\t\t\tprint(f\"[eval] loaded adapter from {adapter_path}\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(f\"[eval] Failed to load adapter at {adapter_path}: {e}\")\n\t\t\thf_model.eval()\n\t\texcept Exception as e:\n\t\t\tprint(f\"[eval] Failed to load HF model: {e}\")\n\t\t\treturn\n\telse:\n\t\t# Local tiny LM (trained vs random)\n\t\tfrom specs.config import ModelConfig # type: ignore\n\t\tfrom model.factory import build_causal_lm # type: ignore\n\t\tcfg_m = ModelConfig(\n\t\t\td_model=256, n_heads=8, n_layers=2, d_ff=1024, vocab_size=int(getattr(tok, \"vocab_size\", 32000) or 32000),\n\t\t\tdtype=\"float32\", kv_cache_paged=False, attn_impl=\"eager\",\n\t\t)\n\t\tmodel = build_causal_lm(cfg_m, block=\"llama\", compress=None)\n\t\tmodel = model.to(device)\n\t\tif eval_random:\n\t\t\tprint(\"[eval] EVAL_RANDOM=1 set, skipping checkpoint load (random-initialized model).\")\n\t\telse:\n\t\t\tif ckpt_path.exists():\n\t\t\t\tstate = torch.load(ckpt_path, map_location=device)\n\t\t\t\tmodel.load_state_dict(state, strict=False)\n\t\t\telse:\n\t\t\t\tprint(f\"[eval] Warning: checkpoint not found at {ckpt_path}, evaluating random-initialized model.\")\n\t\tmodel.eval()\n\n\t# Evaluate\n\t# Use pad-aware loss\n\tif eval_model == \"llama_hf\":\n\t\tpad_id = int(getattr(hf_tok, \"pad_token_id\", 0) or 0)\n\telse:\n\t\tpad_id = int(getattr(tok, \"pad_token_id\", 0) or 0)\n\tloss_fn = torch.nn.CrossEntropyLoss(ignore_index=-100)\n\ttotal_loss = 0.0\n\tnum = 0\n\twith torch.no_grad():\n\t\tif eval_model == \"llama_hf\":\n\t\t\tmax_len = 128\n\t\t\tfor t in texts:\n\t\t\t\tenc = hf_tok(t, return_tensors=\"pt\", truncation=True, max_length=max_len, padding=\"max_length\")\n\t\t\t\tinput_ids = enc[\"input_ids\"].to(device if torch.cuda.is_available() else \"cpu\")\n\t\t\t\tattn_mask = enc[\"attention_mask\"].to(device if torch.cuda.is_available() else \"cpu\")\n\t\t\t\tout = hf_model(input_ids=input_ids, attention_mask=attn_mask, return_dict=True)\n\t\t\t\tlogits = out[\"logits\"]\n\t\t\t\tvocab = logits.size(-1)\n\t\t\t\tlabels = input_ids[..., 1:].contiguous()\n\t\t\t\tlabels = labels.masked_fill(labels == pad_id, -100)\n\t\t\t\tloss = loss_fn(\n\t\t\t\t\tlogits[..., :-1, :].contiguous().view(-1, vocab),\n\t\t\t\t\tlabels.view(-1),\n\t\t\t\t)\n\t\t\t\ttotal_loss += float(loss.detach().item())\n\t\t\t\tnum += 1\n\t\telse:\n\t\t\tfor i in range(len(ds)):\n\t\t\t\tbatch = ds[i]\n\t\t\t\tbatch = {k: v.to(device) for k, v in batch.items()}\n\t\t\t\tout = model(input_ids=batch[\"input_ids\"], attention_mask=batch[\"attn_mask\"], return_dict=True)\n\t\t\t\tlogits = out[\"logits\"] # [B, T, V]\n\t\t\t\tvocab = logits.size(-1)\n\t\t\t\tlabels = batch[\"input_ids\"][..., 1:].contiguous()\n\t\t\t\tlabels = labels.masked_fill(labels == pad_id, -100)\n\t\t\t\tloss = loss_fn(\n\t\t\t\t\tlogits[..., :-1, :].contiguous().view(-1, vocab),\n\t\t\t\t\tlabels.view(-1),\n\t\t\t\t)\n\t\t\t\ttotal_loss += float(loss.detach().item())\n\t\t\t\tnum += 1\n\tavg_loss = total_loss / max(1, num)\n\ttry:\n\t\timport math\n\t\tppl = float(math.exp(avg_loss))\n\texcept Exception:\n\t\tppl = None\n\n\treport = {\n\t\t\"split\": \"test\",\n\t\t\"avg_loss\": avg_loss,\n\t\t\"ppl\": ppl,\n\t\t\"samples\": num,\n\t\t\"device\": str(device),\n\t\t\"ckpt_path\": str(ckpt_path),\n\t}\n\tif eval_model == \"llama_hf\":\n\t\tout_filename = \"MBPPEvalTestReport.llama.json\"\n\telse:\n\t\tout_filename = \"MBPPEvalTestReport.random.json\" if eval_random else \"MBPPEvalTestReport.json\"\n\tout_path = out_dir / out_filename\n\twith open(out_path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps({\"schema_version\": 1, \"EvalReport\": report}, indent=2))\n\tprint(str(out_path))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","source_hash":"4a8585424d951009c2013785d3f0e83ddf8f75cbd6510db7aad9b48a641a59cc","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_mbpp_test_eval.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_mbpp_test_eval.__init__#L20-L26","kind":"function","name":"__init__","path":"examples/dataset_trainer/run_mbpp_test_eval.py","language":"python","start_line":20,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset\n\nROOT = EX_DIR\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids.unsqueeze(0), \"attn_mask\": attn.unsqueeze(0)}\n\n\ndef main() -> None:\n\t# Prefer evaluating the \"complete\" run by default\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tif not artifacts_root.exists():\n\t\t# Fallback to smoke artifacts if complete doesn't exist\n\t\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"","source_hash":"4a8585424d951009c2013785d3f0e83ddf8f75cbd6510db7aad9b48a641a59cc","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_mbpp_test_eval.__len__","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_mbpp_test_eval.__len__#L28-L29","kind":"function","name":"__len__","path":"examples/dataset_trainer/run_mbpp_test_eval.py","language":"python","start_line":28,"end_line":29,"context_start_line":8,"context_end_line":49,"code":"\nimport torch\nfrom torch.utils.data import Dataset\n\nROOT = EX_DIR\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids.unsqueeze(0), \"attn_mask\": attn.unsqueeze(0)}\n\n\ndef main() -> None:\n\t# Prefer evaluating the \"complete\" run by default\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tif not artifacts_root.exists():\n\t\t# Fallback to smoke artifacts if complete doesn't exist\n\t\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tckpt_path = adapters_dir / \"trained_ckpt\" / \"model.pt\"","source_hash":"4a8585424d951009c2013785d3f0e83ddf8f75cbd6510db7aad9b48a641a59cc","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.dataset_trainer.run_mbpp_test_eval.__getitem__","uri":"program://Program_Conditioned_Adapter/function/examples.dataset_trainer.run_mbpp_test_eval.__getitem__#L31-L37","kind":"function","name":"__getitem__","path":"examples/dataset_trainer/run_mbpp_test_eval.py","language":"python","start_line":31,"end_line":37,"context_start_line":11,"context_end_line":57,"code":"\nROOT = EX_DIR\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer\n\t\tself.max_len = int(max_len)\n\t\tself.samples: List[List[int]] = []\n\t\tfor t in texts:\n\t\t\tids = [int(i) for i in self.tok.encode(t)]\n\t\t\tself.samples.append(ids[: self.max_len])\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n\t\tids = self.samples[idx]\n\t\tpad_id = int(getattr(self.tok, \"pad_token_id\", 0) or 0)\n\t\tseq = ids + [pad_id] * max(0, self.max_len - len(ids))\n\t\tinput_ids = torch.tensor(seq[: self.max_len], dtype=torch.long)\n\t\tattn = (input_ids != pad_id).to(torch.long)\n\t\treturn {\"input_ids\": input_ids.unsqueeze(0), \"attn_mask\": attn.unsqueeze(0)}\n\n\ndef main() -> None:\n\t# Prefer evaluating the \"complete\" run by default\n\texample_dir = Path(__file__).resolve().parent\n\tartifacts_root = example_dir / \"artifacts\" / \"complete_dataset_trainer\"\n\tif not artifacts_root.exists():\n\t\t# Fallback to smoke artifacts if complete doesn't exist\n\t\tartifacts_root = example_dir / \"artifacts\" / \"smoke_dataset_trainer\"\n\tadapters_dir = artifacts_root\n\tout_dir = artifacts_root / \"outputs\"\n\tckpt_path = adapters_dir / \"trained_ckpt\" / \"model.pt\"\n\tout_dir.mkdir(parents=True, exist_ok=True)\n\teval_random = os.getenv(\"EVAL_RANDOM\", \"0\") == \"1\"\n\teval_model = os.getenv(\"EVAL_MODEL\", \"local\") # \"local\" (toy LM) or \"llama_hf\"\n\n\tif str(repo_root) not in sys.path:\n\t\tsys.path.insert(0, str(repo_root))\n\tpy_path = os.environ.get(\"PYTHONPATH\") or \"\"\n\tos.environ[\"PYTHONPATH\"] = f\"{str(repo_root)}{(':' + py_path) if py_path else ''}\"","source_hash":"4a8585424d951009c2013785d3f0e83ddf8f75cbd6510db7aad9b48a641a59cc","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.run_smoke_example","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_planning.run_smoke_example#L1-L103","kind":"module","name":"examples.python_repo_grounded_planning.run_smoke_example","path":"examples/python_repo_grounded_planning/run_smoke_example.py","language":"python","start_line":1,"end_line":103,"context_start_line":1,"context_end_line":103,"code":"from __future__ import annotations\n\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n\ndef main() -> None:\n cfg = load_program_config(str(SMOKE_REPO))\n adapters_dir = cfg.paths.adapters_dir\n adapters_dir.mkdir(parents=True, exist_ok=True)\n # Emit planning knowledge artifacts (entities/edges/artifacts-derived plans)\n rc = _run([\n sys.executable,\n str(Path(__file__).resolve().parent / \"emit_planning_knowledge.py\"),\n \"--program\", str(SMOKE_REPO),\n \"--pg-backend\", (cfg.pg_backend if \":\" in cfg.pg_backend else PG_BACKEND),\n \"--out-dir\", str(cfg.paths.knowledge_dir),\n \"--max-modules\", \"200\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n # Build adapters and caches for the smoke repo (PCA-agnostic; PG via --pg-backend)\n rc = _run([\n sys.executable,\n str(EX_DIR / \"build.py\"),\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--embed-dim\", \"256\",\n \"--include-text\",\n \"--text-max-bytes\", \"20000\",\n \"--pg-backend\", cfg.pg_backend if \":\" in cfg.pg_backend else PG_BACKEND,\n \"--graph-prop-hops\", \"2\",\n \"--graph-prop-damp\", \"0.85\",\n \"--contracts-require-citations\",\n \"--contracts-retrieval-policy\", str(cfg.contracts.retrieval_policy),\n \"--contracts-retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--kbann-priors\",\n \"--knowledge-preset\",\n \"--auto-rank\",\n \"--rank-min\", \"8\",\n \"--rank-max\", \"16\",\n \"--init-program-state\",\n \"--program-state-path\", str(cfg.paths.program_state_path),\n \"--seed\", \"0\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n # Grounded planning prompt (facts-seeded steps + citations)\n prompt = (\n \"Produce a grounded step-by-step plan to add a new CLI subcommand \"\n \"that lists all modules and their public functions. Include specific paths and \"\n \"line ranges to modify or create, referencing code entities with citations.\"\n )\n\n # Run grounded planning (structured mode; citations enforced)\n rc = _run([\n sys.executable,\n str(EX_DIR / \"run.py\"),\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--prompt\", prompt,\n \"--of-sources\", \"question\",\n \"--pack-context\",\n \"--pack-mode\", \"windows\",\n \"--context-tokens\", \"1200\",\n \"--require-citations\",\n \"--structured\",\n \"--citations-enforce\",\n \"--pg-backend\", cfg.pg_backend if \":\" in cfg.pg_backend else PG_BACKEND,\n \"--retrieval-policy\", cfg.contracts.retrieval_policy,\n \"--retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--alpha-warmup\",\n \"--adapter-aware-decoding\",\n \"--program-state\", str(cfg.paths.program_state_path),\n \"--delta-cap\", \"0.05\",\n \"--verbose\",\n ])\n sys.exit(rc)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"90a757abf38e6a5c96d8296e40920d18910bfd0e26b9576dd1ffe81cc7fb8786","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.run_smoke_example._run","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.run_smoke_example._run#L15-L17","kind":"function","name":"_run","path":"examples/python_repo_grounded_planning/run_smoke_example.py","language":"python","start_line":15,"end_line":17,"context_start_line":1,"context_end_line":37,"code":"from __future__ import annotations\n\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n\ndef main() -> None:\n cfg = load_program_config(str(SMOKE_REPO))\n adapters_dir = cfg.paths.adapters_dir\n adapters_dir.mkdir(parents=True, exist_ok=True)\n # Emit planning knowledge artifacts (entities/edges/artifacts-derived plans)\n rc = _run([\n sys.executable,\n str(Path(__file__).resolve().parent / \"emit_planning_knowledge.py\"),\n \"--program\", str(SMOKE_REPO),\n \"--pg-backend\", (cfg.pg_backend if \":\" in cfg.pg_backend else PG_BACKEND),\n \"--out-dir\", str(cfg.paths.knowledge_dir),\n \"--max-modules\", \"200\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n # Build adapters and caches for the smoke repo (PCA-agnostic; PG via --pg-backend)","source_hash":"90a757abf38e6a5c96d8296e40920d18910bfd0e26b9576dd1ffe81cc7fb8786","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.run_smoke_example.main","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.run_smoke_example.main#L20-L97","kind":"function","name":"main","path":"examples/python_repo_grounded_planning/run_smoke_example.py","language":"python","start_line":20,"end_line":97,"context_start_line":1,"context_end_line":103,"code":"from __future__ import annotations\n\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n\ndef main() -> None:\n cfg = load_program_config(str(SMOKE_REPO))\n adapters_dir = cfg.paths.adapters_dir\n adapters_dir.mkdir(parents=True, exist_ok=True)\n # Emit planning knowledge artifacts (entities/edges/artifacts-derived plans)\n rc = _run([\n sys.executable,\n str(Path(__file__).resolve().parent / \"emit_planning_knowledge.py\"),\n \"--program\", str(SMOKE_REPO),\n \"--pg-backend\", (cfg.pg_backend if \":\" in cfg.pg_backend else PG_BACKEND),\n \"--out-dir\", str(cfg.paths.knowledge_dir),\n \"--max-modules\", \"200\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n # Build adapters and caches for the smoke repo (PCA-agnostic; PG via --pg-backend)\n rc = _run([\n sys.executable,\n str(EX_DIR / \"build.py\"),\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--embed-dim\", \"256\",\n \"--include-text\",\n \"--text-max-bytes\", \"20000\",\n \"--pg-backend\", cfg.pg_backend if \":\" in cfg.pg_backend else PG_BACKEND,\n \"--graph-prop-hops\", \"2\",\n \"--graph-prop-damp\", \"0.85\",\n \"--contracts-require-citations\",\n \"--contracts-retrieval-policy\", str(cfg.contracts.retrieval_policy),\n \"--contracts-retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--kbann-priors\",\n \"--knowledge-preset\",\n \"--auto-rank\",\n \"--rank-min\", \"8\",\n \"--rank-max\", \"16\",\n \"--init-program-state\",\n \"--program-state-path\", str(cfg.paths.program_state_path),\n \"--seed\", \"0\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n # Grounded planning prompt (facts-seeded steps + citations)\n prompt = (\n \"Produce a grounded step-by-step plan to add a new CLI subcommand \"\n \"that lists all modules and their public functions. Include specific paths and \"\n \"line ranges to modify or create, referencing code entities with citations.\"\n )\n\n # Run grounded planning (structured mode; citations enforced)\n rc = _run([\n sys.executable,\n str(EX_DIR / \"run.py\"),\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--prompt\", prompt,\n \"--of-sources\", \"question\",\n \"--pack-context\",\n \"--pack-mode\", \"windows\",\n \"--context-tokens\", \"1200\",\n \"--require-citations\",\n \"--structured\",\n \"--citations-enforce\",\n \"--pg-backend\", cfg.pg_backend if \":\" in cfg.pg_backend else PG_BACKEND,\n \"--retrieval-policy\", cfg.contracts.retrieval_policy,\n \"--retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--alpha-warmup\",\n \"--adapter-aware-decoding\",\n \"--program-state\", str(cfg.paths.program_state_path),\n \"--delta-cap\", \"0.05\",\n \"--verbose\",\n ])\n sys.exit(rc)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"90a757abf38e6a5c96d8296e40920d18910bfd0e26b9576dd1ffe81cc7fb8786","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.program_config","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_planning.program_config#L1-L54","kind":"module","name":"examples.python_repo_grounded_planning.program_config","path":"examples/python_repo_grounded_planning/program_config.py","language":"python","start_line":1,"end_line":54,"context_start_line":1,"context_end_line":54,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4,plan:0.2\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_dir: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n # Reuse the QA PythonRepoGraph backend by default\n return \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n example_dir = Path(__file__).resolve().parent\n adapters_dir = example_dir / \"artifacts\" / \"smoke_planning\"\n knowledge_dir = adapters_dir # colocate planning knowledge\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"\n return ProgramConfig(\n program_id=program_id,\n pg_backend=pg_backend,\n paths=ProgramPaths(\n adapters_dir=adapters_dir,\n knowledge_dir=knowledge_dir,\n program_state_path=adapters_dir / \".program_state.json\",\n ),\n contracts=ProgramContracts(),\n )\n\n","source_hash":"b27f3c01fb9e7472f3641d69d5ee00db578c62f3ab0f3ef5b59f063082bf7085","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.program_config.ProgramContracts","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_planning.program_config.ProgramContracts#L9-L13","kind":"class","name":"ProgramContracts","path":"examples/python_repo_grounded_planning/program_config.py","language":"python","start_line":9,"end_line":13,"context_start_line":1,"context_end_line":33,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4,plan:0.2\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_dir: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n # Reuse the QA PythonRepoGraph backend by default\n return \"examples.scripts.python_repo_graph:PythonRepoGraph\"","source_hash":"b27f3c01fb9e7472f3641d69d5ee00db578c62f3ab0f3ef5b59f063082bf7085","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.program_config.ProgramPaths","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_planning.program_config.ProgramPaths#L17-L20","kind":"class","name":"ProgramPaths","path":"examples/python_repo_grounded_planning/program_config.py","language":"python","start_line":17,"end_line":20,"context_start_line":1,"context_end_line":40,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4,plan:0.2\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_dir: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n # Reuse the QA PythonRepoGraph backend by default\n return \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n example_dir = Path(__file__).resolve().parent\n adapters_dir = example_dir / \"artifacts\" / \"smoke_planning\"\n knowledge_dir = adapters_dir # colocate planning knowledge","source_hash":"b27f3c01fb9e7472f3641d69d5ee00db578c62f3ab0f3ef5b59f063082bf7085","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.program_config.ProgramConfig","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_planning.program_config.ProgramConfig#L24-L28","kind":"class","name":"ProgramConfig","path":"examples/python_repo_grounded_planning/program_config.py","language":"python","start_line":24,"end_line":28,"context_start_line":4,"context_end_line":48,"code":"from pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4,plan:0.2\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_dir: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n # Reuse the QA PythonRepoGraph backend by default\n return \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n example_dir = Path(__file__).resolve().parent\n adapters_dir = example_dir / \"artifacts\" / \"smoke_planning\"\n knowledge_dir = adapters_dir # colocate planning knowledge\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"\n return ProgramConfig(\n program_id=program_id,\n pg_backend=pg_backend,\n paths=ProgramPaths(\n adapters_dir=adapters_dir,\n knowledge_dir=knowledge_dir,","source_hash":"b27f3c01fb9e7472f3641d69d5ee00db578c62f3ab0f3ef5b59f063082bf7085","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.program_config._detect_pg_backend","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.program_config._detect_pg_backend#L31-L33","kind":"function","name":"_detect_pg_backend","path":"examples/python_repo_grounded_planning/program_config.py","language":"python","start_line":31,"end_line":33,"context_start_line":11,"context_end_line":53,"code":" citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4,plan:0.2\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_dir: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n # Reuse the QA PythonRepoGraph backend by default\n return \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n example_dir = Path(__file__).resolve().parent\n adapters_dir = example_dir / \"artifacts\" / \"smoke_planning\"\n knowledge_dir = adapters_dir # colocate planning knowledge\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"\n return ProgramConfig(\n program_id=program_id,\n pg_backend=pg_backend,\n paths=ProgramPaths(\n adapters_dir=adapters_dir,\n knowledge_dir=knowledge_dir,\n program_state_path=adapters_dir / \".program_state.json\",\n ),\n contracts=ProgramContracts(),\n )\n","source_hash":"b27f3c01fb9e7472f3641d69d5ee00db578c62f3ab0f3ef5b59f063082bf7085","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.program_config.load_program_config","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.program_config.load_program_config#L36-L52","kind":"function","name":"load_program_config","path":"examples/python_repo_grounded_planning/program_config.py","language":"python","start_line":36,"end_line":52,"context_start_line":16,"context_end_line":54,"code":"@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_dir: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(example_dir: Path) -> str:\n # Reuse the QA PythonRepoGraph backend by default\n return \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n example_dir = Path(__file__).resolve().parent\n adapters_dir = example_dir / \"artifacts\" / \"smoke_planning\"\n knowledge_dir = adapters_dir # colocate planning knowledge\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"\n return ProgramConfig(\n program_id=program_id,\n pg_backend=pg_backend,\n paths=ProgramPaths(\n adapters_dir=adapters_dir,\n knowledge_dir=knowledge_dir,\n program_state_path=adapters_dir / \".program_state.json\",\n ),\n contracts=ProgramContracts(),\n )\n\n","source_hash":"b27f3c01fb9e7472f3641d69d5ee00db578c62f3ab0f3ef5b59f063082bf7085","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.smoke_plan","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_planning.smoke_plan#L1-L87","kind":"module","name":"examples.python_repo_grounded_planning.smoke_plan","path":"examples/python_repo_grounded_planning/smoke_plan.py","language":"python","start_line":1,"end_line":87,"context_start_line":1,"context_end_line":87,"code":"from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nimport subprocess\n\nEX_DIR = Path(\"/data/transformer_10/examples/program_conditioned_adapter\")\nSMOKE_PROG = Path(\"/data/transformer_10/examples/program_conditioned_adapter/smoke_repo\")\nART_DIR = EX_DIR / \"examples\" / \"python_repo_grounded_planning\" / \"artifacts\" / \"smoke_planning\"\n\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\nEMIT_MOD = \"examples.python_repo_grounded_planning.emit_planning_knowledge\"\n\n\ndef _run(argv: list[str]) -> int:\n print(\"[run]\", \" \".join(argv))\n return subprocess.call(argv)\n\n\ndef main() -> None:\n ART_DIR.mkdir(parents=True, exist_ok=True)\n\n rc = _run([\n sys.executable, \"-m\", \"build\",\n \"--program\", str(SMOKE_PROG),\n \"--pg-backend\", PG_BACKEND,\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(ART_DIR),\n \"--embed-dim\", \"256\",\n \"--include-text\",\n \"--text-max-bytes\", \"20000\",\n \"--graph-prop-hops\", \"1\",\n \"--graph-prop-damp\", \"0.85\",\n \"--code-recall-preset\",\n \"--auto-rank\",\n \"--rank-min\", \"4\",\n \"--rank-max\", \"16\",\n \"--seed\", \"0\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n rc = _run([\n sys.executable, \"-m\", EMIT_MOD,\n \"--program\", str(SMOKE_PROG),\n \"--pg-backend\", PG_BACKEND,\n \"--out-dir\", str(ART_DIR),\n \"--max-modules\", \"200\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n prompt = (\n \"Produce a grounded step-by-step plan to add a new CLI subcommand that lists all modules \"\n \"and their public functions. Include exact files or artifact URIs and line ranges to modify/create, \"\n \"and cite entities/windows.\"\n )\n\n rc = _run([\n sys.executable, \"-m\", \"run\",\n \"--program\", str(SMOKE_PROG),\n \"--pg-backend\", PG_BACKEND,\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(ART_DIR),\n \"--prompt\", prompt,\n \"--of-sources\", \"question\",\n \"--pack-context\",\n \"--pack-mode\", \"windows\",\n \"--context-tokens\", \"1200\",\n \"--require-citations\",\n \"--structured\",\n \"--citations-enforce\",\n \"--retrieval-policy\", \"sim:0.45,struct:0.35,plan:0.20\",\n \"--retrieval-temp\", \"0.7\",\n \"--alpha-warmup\",\n \"--adapter-aware-decoding\",\n \"--verbose\",\n ])\n sys.exit(rc)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"f4465ac41da44f719cfa99a4831661f80813ad48f5e2f4e6c1b806c7a6f018e4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.smoke_plan._run","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.smoke_plan._run#L15-L17","kind":"function","name":"_run","path":"examples/python_repo_grounded_planning/smoke_plan.py","language":"python","start_line":15,"end_line":17,"context_start_line":1,"context_end_line":37,"code":"from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nimport subprocess\n\nEX_DIR = Path(\"/data/transformer_10/examples/program_conditioned_adapter\")\nSMOKE_PROG = Path(\"/data/transformer_10/examples/program_conditioned_adapter/smoke_repo\")\nART_DIR = EX_DIR / \"examples\" / \"python_repo_grounded_planning\" / \"artifacts\" / \"smoke_planning\"\n\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\nEMIT_MOD = \"examples.python_repo_grounded_planning.emit_planning_knowledge\"\n\n\ndef _run(argv: list[str]) -> int:\n print(\"[run]\", \" \".join(argv))\n return subprocess.call(argv)\n\n\ndef main() -> None:\n ART_DIR.mkdir(parents=True, exist_ok=True)\n\n rc = _run([\n sys.executable, \"-m\", \"build\",\n \"--program\", str(SMOKE_PROG),\n \"--pg-backend\", PG_BACKEND,\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(ART_DIR),\n \"--embed-dim\", \"256\",\n \"--include-text\",\n \"--text-max-bytes\", \"20000\",\n \"--graph-prop-hops\", \"1\",\n \"--graph-prop-damp\", \"0.85\",\n \"--code-recall-preset\",\n \"--auto-rank\",\n \"--rank-min\", \"4\",\n \"--rank-max\", \"16\",","source_hash":"f4465ac41da44f719cfa99a4831661f80813ad48f5e2f4e6c1b806c7a6f018e4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.smoke_plan.main","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.smoke_plan.main#L20-L81","kind":"function","name":"main","path":"examples/python_repo_grounded_planning/smoke_plan.py","language":"python","start_line":20,"end_line":81,"context_start_line":1,"context_end_line":87,"code":"from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nimport subprocess\n\nEX_DIR = Path(\"/data/transformer_10/examples/program_conditioned_adapter\")\nSMOKE_PROG = Path(\"/data/transformer_10/examples/program_conditioned_adapter/smoke_repo\")\nART_DIR = EX_DIR / \"examples\" / \"python_repo_grounded_planning\" / \"artifacts\" / \"smoke_planning\"\n\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\nEMIT_MOD = \"examples.python_repo_grounded_planning.emit_planning_knowledge\"\n\n\ndef _run(argv: list[str]) -> int:\n print(\"[run]\", \" \".join(argv))\n return subprocess.call(argv)\n\n\ndef main() -> None:\n ART_DIR.mkdir(parents=True, exist_ok=True)\n\n rc = _run([\n sys.executable, \"-m\", \"build\",\n \"--program\", str(SMOKE_PROG),\n \"--pg-backend\", PG_BACKEND,\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(ART_DIR),\n \"--embed-dim\", \"256\",\n \"--include-text\",\n \"--text-max-bytes\", \"20000\",\n \"--graph-prop-hops\", \"1\",\n \"--graph-prop-damp\", \"0.85\",\n \"--code-recall-preset\",\n \"--auto-rank\",\n \"--rank-min\", \"4\",\n \"--rank-max\", \"16\",\n \"--seed\", \"0\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n rc = _run([\n sys.executable, \"-m\", EMIT_MOD,\n \"--program\", str(SMOKE_PROG),\n \"--pg-backend\", PG_BACKEND,\n \"--out-dir\", str(ART_DIR),\n \"--max-modules\", \"200\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n\n prompt = (\n \"Produce a grounded step-by-step plan to add a new CLI subcommand that lists all modules \"\n \"and their public functions. Include exact files or artifact URIs and line ranges to modify/create, \"\n \"and cite entities/windows.\"\n )\n\n rc = _run([\n sys.executable, \"-m\", \"run\",\n \"--program\", str(SMOKE_PROG),\n \"--pg-backend\", PG_BACKEND,\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(ART_DIR),\n \"--prompt\", prompt,\n \"--of-sources\", \"question\",\n \"--pack-context\",\n \"--pack-mode\", \"windows\",\n \"--context-tokens\", \"1200\",\n \"--require-citations\",\n \"--structured\",\n \"--citations-enforce\",\n \"--retrieval-policy\", \"sim:0.45,struct:0.35,plan:0.20\",\n \"--retrieval-temp\", \"0.7\",\n \"--alpha-warmup\",\n \"--adapter-aware-decoding\",\n \"--verbose\",\n ])\n sys.exit(rc)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"f4465ac41da44f719cfa99a4831661f80813ad48f5e2f4e6c1b806c7a6f018e4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.emit_planning_knowledge","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_planning.emit_planning_knowledge#L1-L176","kind":"module","name":"examples.python_repo_grounded_planning.emit_planning_knowledge","path":"examples/python_repo_grounded_planning/emit_planning_knowledge.py","language":"python","start_line":1,"end_line":176,"context_start_line":1,"context_end_line":176,"code":"from __future__ import annotations\n\nimport argparse\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Optional, Set\nimport importlib\nimport numpy as np\n\n\ndef _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\n\ndef _write_jsonl(path: Path, rows: List[Dict]) -> None:\n with path.open(\"w\", encoding=\"utf-8\") as f:\n for r in rows:\n f.write(json.dumps(r) + \"\\n\")\n\n\ndef main() -> None:\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--program\", required=True)\n ap.add_argument(\"--pg-backend\", required=True)\n ap.add_argument(\"--out-dir\", required=True)\n ap.add_argument(\"--max-modules\", type=int, default=200)\n ap.add_argument(\"--verbose\", action=\"store_true\")\n args = ap.parse_args()\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n\n pg_ctor = _load_symbol(args.pg_backend)\n pg = pg_ctor(args.program, ignore=None)\n\n # Collect entities and a simple owner map\n entities = list(pg.entities())\n modules = [e for e in entities if e.kind == \"module\"]\n functions = [e for e in entities if e.kind == \"function\"]\n classes = [e for e in entities if e.kind == \"class\"]\n\n # 1) planning_components.jsonl (owners/modules with primary artifact spans)\n comps: List[Dict] = []\n for m in modules[: max(1, int(args.max_modules))]:\n try:\n ra = pg.resolve(m.uri)\n comps.append({\n \"owner\": (m.name or m.id),\n \"kind\": \"module\",\n \"artifact_uri\": ra.artifact_uri,\n \"span\": {\"start\": int(ra.span.start_line), \"end\": int(ra.span.end_line)},\n })\n except Exception:\n continue\n _write_jsonl(out_dir / \"planning_components.jsonl\", comps)\n\n # 2) planning_entrypoints.jsonl (very light heuristic: functions named main or having owner 'cli'/'__main__')\n entries: List[Dict] = []\n for fn in functions:\n name = (fn.name or \"\").lower()\n own = (fn.owner or \"\").lower()\n if name == \"main\" or \"__main__\" in own or \"cli\" in own:\n try:\n ra = pg.resolve(fn.uri)\n entries.append({\n \"type\": \"cli\",\n \"name\": fn.name,\n \"handler\": f\"{own}:{fn.name}\" if own and fn.name else fn.id,\n \"decl_uri\": ra.artifact_uri,\n \"span\": {\"start\": int(ra.span.start_line), \"end\": int(ra.span.end_line)},\n })\n except Exception:\n continue\n _write_jsonl(out_dir / \"planning_entrypoints.jsonl\", entries)\n\n # 3) planning_mutations.jsonl (modules as candidate edit sites with affordances)\n muts: List[Dict] = []\n for m in modules[: max(1, int(args.max_modules))]:\n try:\n ra = pg.resolve(m.uri)\n muts.append({\n \"target\": (m.name or m.id),\n \"kind\": \"module\",\n \"artifact_uri\": ra.artifact_uri,\n \"affordances\": [\"add_function\", \"edit_imports\", \"edit_exports\"],\n })\n except Exception:\n continue\n _write_jsonl(out_dir / \"planning_mutations.jsonl\", muts)\n\n # 4) planning_tests_map.jsonl (map owners to test files by simple heuristic)\n tests_map: List[Dict] = []\n # harvest artifacts(kind=\"source\") and flag ones with \"test\" in path\n test_arts: Set[str] = set()\n for art in pg.artifacts(\"source\"):\n p = art.uri.lower()\n if (\"test\" in p) or (\"/tests/\" in p):\n test_arts.add(art.uri)\n for m in modules:\n # naive mapping: module owner name in test path\n owner = (m.name or m.id)\n for ta in test_arts:\n if owner.split(\".\")[-1] in ta:\n tests_map.append({\n \"owner\": owner,\n \"test_file\": ta,\n \"span\": {\"start\": 1, \"end\": 1},\n })\n break\n _write_jsonl(out_dir / \"planning_tests_map.jsonl\", tests_map)\n\n # 5) planning_dependencies.jsonl (import/call edges at entity granularity)\n deps: List[Dict] = []\n try:\n for e in pg.edges():\n deps.append({\"src\": e.src, \"dst\": e.dst, \"edge\": e.type})\n except Exception:\n deps = []\n _write_jsonl(out_dir / \"planning_dependencies.jsonl\", deps)\n\n # 6) planning_rerank_features.npz (very light owner graph centrality and placeholders)\n owners = sorted(list({(m.owner or m.name or m.id) for m in modules}))\n owner_index: Dict[str, int] = {o: i for i, o in enumerate(owners)}\n centrality = np.zeros((len(owners),), dtype=np.float32)\n try:\n owner_edges: Dict[str, Set[str]] = {}\n for e in pg.edges():\n # map entity ids to owners if available\n srco = None\n dsto = None\n # build entity -> owner map lazily\n ent_owner: Dict[str, Optional[str]] = {}\n for ent in entities:\n ent_owner[ent.id] = ent.owner\n for e in pg.edges():\n so = ent_owner.get(e.src)\n do = ent_owner.get(e.dst)\n if so and do:\n owner_edges.setdefault(so, set()).add(do)\n owner_edges.setdefault(do, set()).add(so)\n for o, nbs in owner_edges.items():\n idx = owner_index.get(o)\n if idx is not None:\n centrality[idx] = float(len(nbs))\n if centrality.max() > 0:\n centrality = centrality / centrality.max()\n except Exception:\n centrality = np.zeros((len(owners),), dtype=np.float32)\n registry_score = np.zeros_like(centrality, dtype=np.float32)\n test_coverage = np.zeros_like(centrality, dtype=np.float32)\n np.savez_compressed(\n out_dir / \"planning_rerank_features.npz\",\n owners=np.array(owners, dtype=object),\n centrality=centrality,\n registry_score=registry_score,\n test_coverage=test_coverage,\n )\n\n if args.verbose:\n print(json.dumps({\n \"components\": len(comps),\n \"entrypoints\": len(entries),\n \"mutations\": len(muts),\n \"tests_map\": len(tests_map),\n \"dependencies\": len(deps),\n \"owners\": len(owners),\n }, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"1577ce0ce444b6f02d027e228a131a0b738b5ee812804494ea9dad0b712240f0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.emit_planning_knowledge._load_symbol","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.emit_planning_knowledge._load_symbol#L12-L15","kind":"function","name":"_load_symbol","path":"examples/python_repo_grounded_planning/emit_planning_knowledge.py","language":"python","start_line":12,"end_line":15,"context_start_line":1,"context_end_line":35,"code":"from __future__ import annotations\n\nimport argparse\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Optional, Set\nimport importlib\nimport numpy as np\n\n\ndef _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\n\ndef _write_jsonl(path: Path, rows: List[Dict]) -> None:\n with path.open(\"w\", encoding=\"utf-8\") as f:\n for r in rows:\n f.write(json.dumps(r) + \"\\n\")\n\n\ndef main() -> None:\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--program\", required=True)\n ap.add_argument(\"--pg-backend\", required=True)\n ap.add_argument(\"--out-dir\", required=True)\n ap.add_argument(\"--max-modules\", type=int, default=200)\n ap.add_argument(\"--verbose\", action=\"store_true\")\n args = ap.parse_args()\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n","source_hash":"1577ce0ce444b6f02d027e228a131a0b738b5ee812804494ea9dad0b712240f0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.emit_planning_knowledge._write_jsonl","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.emit_planning_knowledge._write_jsonl#L18-L21","kind":"function","name":"_write_jsonl","path":"examples/python_repo_grounded_planning/emit_planning_knowledge.py","language":"python","start_line":18,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"from __future__ import annotations\n\nimport argparse\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Optional, Set\nimport importlib\nimport numpy as np\n\n\ndef _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\n\ndef _write_jsonl(path: Path, rows: List[Dict]) -> None:\n with path.open(\"w\", encoding=\"utf-8\") as f:\n for r in rows:\n f.write(json.dumps(r) + \"\\n\")\n\n\ndef main() -> None:\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--program\", required=True)\n ap.add_argument(\"--pg-backend\", required=True)\n ap.add_argument(\"--out-dir\", required=True)\n ap.add_argument(\"--max-modules\", type=int, default=200)\n ap.add_argument(\"--verbose\", action=\"store_true\")\n args = ap.parse_args()\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n\n pg_ctor = _load_symbol(args.pg_backend)\n pg = pg_ctor(args.program, ignore=None)\n\n # Collect entities and a simple owner map\n entities = list(pg.entities())\n modules = [e for e in entities if e.kind == \"module\"]","source_hash":"1577ce0ce444b6f02d027e228a131a0b738b5ee812804494ea9dad0b712240f0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_planning.emit_planning_knowledge.main","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_planning.emit_planning_knowledge.main#L24-L170","kind":"function","name":"main","path":"examples/python_repo_grounded_planning/emit_planning_knowledge.py","language":"python","start_line":24,"end_line":170,"context_start_line":4,"context_end_line":176,"code":"import json\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Optional, Set\nimport importlib\nimport numpy as np\n\n\ndef _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\n\ndef _write_jsonl(path: Path, rows: List[Dict]) -> None:\n with path.open(\"w\", encoding=\"utf-8\") as f:\n for r in rows:\n f.write(json.dumps(r) + \"\\n\")\n\n\ndef main() -> None:\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--program\", required=True)\n ap.add_argument(\"--pg-backend\", required=True)\n ap.add_argument(\"--out-dir\", required=True)\n ap.add_argument(\"--max-modules\", type=int, default=200)\n ap.add_argument(\"--verbose\", action=\"store_true\")\n args = ap.parse_args()\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n\n pg_ctor = _load_symbol(args.pg_backend)\n pg = pg_ctor(args.program, ignore=None)\n\n # Collect entities and a simple owner map\n entities = list(pg.entities())\n modules = [e for e in entities if e.kind == \"module\"]\n functions = [e for e in entities if e.kind == \"function\"]\n classes = [e for e in entities if e.kind == \"class\"]\n\n # 1) planning_components.jsonl (owners/modules with primary artifact spans)\n comps: List[Dict] = []\n for m in modules[: max(1, int(args.max_modules))]:\n try:\n ra = pg.resolve(m.uri)\n comps.append({\n \"owner\": (m.name or m.id),\n \"kind\": \"module\",\n \"artifact_uri\": ra.artifact_uri,\n \"span\": {\"start\": int(ra.span.start_line), \"end\": int(ra.span.end_line)},\n })\n except Exception:\n continue\n _write_jsonl(out_dir / \"planning_components.jsonl\", comps)\n\n # 2) planning_entrypoints.jsonl (very light heuristic: functions named main or having owner 'cli'/'__main__')\n entries: List[Dict] = []\n for fn in functions:\n name = (fn.name or \"\").lower()\n own = (fn.owner or \"\").lower()\n if name == \"main\" or \"__main__\" in own or \"cli\" in own:\n try:\n ra = pg.resolve(fn.uri)\n entries.append({\n \"type\": \"cli\",\n \"name\": fn.name,\n \"handler\": f\"{own}:{fn.name}\" if own and fn.name else fn.id,\n \"decl_uri\": ra.artifact_uri,\n \"span\": {\"start\": int(ra.span.start_line), \"end\": int(ra.span.end_line)},\n })\n except Exception:\n continue\n _write_jsonl(out_dir / \"planning_entrypoints.jsonl\", entries)\n\n # 3) planning_mutations.jsonl (modules as candidate edit sites with affordances)\n muts: List[Dict] = []\n for m in modules[: max(1, int(args.max_modules))]:\n try:\n ra = pg.resolve(m.uri)\n muts.append({\n \"target\": (m.name or m.id),\n \"kind\": \"module\",\n \"artifact_uri\": ra.artifact_uri,\n \"affordances\": [\"add_function\", \"edit_imports\", \"edit_exports\"],\n })\n except Exception:\n continue\n _write_jsonl(out_dir / \"planning_mutations.jsonl\", muts)\n\n # 4) planning_tests_map.jsonl (map owners to test files by simple heuristic)\n tests_map: List[Dict] = []\n # harvest artifacts(kind=\"source\") and flag ones with \"test\" in path\n test_arts: Set[str] = set()\n for art in pg.artifacts(\"source\"):\n p = art.uri.lower()\n if (\"test\" in p) or (\"/tests/\" in p):\n test_arts.add(art.uri)\n for m in modules:\n # naive mapping: module owner name in test path\n owner = (m.name or m.id)\n for ta in test_arts:\n if owner.split(\".\")[-1] in ta:\n tests_map.append({\n \"owner\": owner,\n \"test_file\": ta,\n \"span\": {\"start\": 1, \"end\": 1},\n })\n break\n _write_jsonl(out_dir / \"planning_tests_map.jsonl\", tests_map)\n\n # 5) planning_dependencies.jsonl (import/call edges at entity granularity)\n deps: List[Dict] = []\n try:\n for e in pg.edges():\n deps.append({\"src\": e.src, \"dst\": e.dst, \"edge\": e.type})\n except Exception:\n deps = []\n _write_jsonl(out_dir / \"planning_dependencies.jsonl\", deps)\n\n # 6) planning_rerank_features.npz (very light owner graph centrality and placeholders)\n owners = sorted(list({(m.owner or m.name or m.id) for m in modules}))\n owner_index: Dict[str, int] = {o: i for i, o in enumerate(owners)}\n centrality = np.zeros((len(owners),), dtype=np.float32)\n try:\n owner_edges: Dict[str, Set[str]] = {}\n for e in pg.edges():\n # map entity ids to owners if available\n srco = None\n dsto = None\n # build entity -> owner map lazily\n ent_owner: Dict[str, Optional[str]] = {}\n for ent in entities:\n ent_owner[ent.id] = ent.owner\n for e in pg.edges():\n so = ent_owner.get(e.src)\n do = ent_owner.get(e.dst)\n if so and do:\n owner_edges.setdefault(so, set()).add(do)\n owner_edges.setdefault(do, set()).add(so)\n for o, nbs in owner_edges.items():\n idx = owner_index.get(o)\n if idx is not None:\n centrality[idx] = float(len(nbs))\n if centrality.max() > 0:\n centrality = centrality / centrality.max()\n except Exception:\n centrality = np.zeros((len(owners),), dtype=np.float32)\n registry_score = np.zeros_like(centrality, dtype=np.float32)\n test_coverage = np.zeros_like(centrality, dtype=np.float32)\n np.savez_compressed(\n out_dir / \"planning_rerank_features.npz\",\n owners=np.array(owners, dtype=object),\n centrality=centrality,\n registry_score=registry_score,\n test_coverage=test_coverage,\n )\n\n if args.verbose:\n print(json.dumps({\n \"components\": len(comps),\n \"entrypoints\": len(entries),\n \"mutations\": len(muts),\n \"tests_map\": len(tests_map),\n \"dependencies\": len(deps),\n \"owners\": len(owners),\n }, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"1577ce0ce444b6f02d027e228a131a0b738b5ee812804494ea9dad0b712240f0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.embedding","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.embedding#L1-L302","kind":"module","name":"examples.scripts.embedding","path":"examples/scripts/embedding.py","language":"python","start_line":1,"end_line":302,"context_start_line":1,"context_end_line":302,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport os\nimport re\nimport numpy as np\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\n\nfrom examples.scripts.code_graph import CodeGraph\n\ndef build_repo_embedding(\n repo_root: str,\n *,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20,\n tests_weight: float = 0.15,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n ignore: Optional[List[str]] = None,\n) -> Dict[str, np.ndarray]:\n g = CodeGraph.load_or_build(repo_root, ignore=ignore)\n\n # Collect lightweight features\n sym_feats: List[Tuple[str, float]] = []\n doc_feats: List[Tuple[str, float]] = []\n mod_feats: List[Tuple[str, float]] = []\n type_feats: List[Tuple[str, float]] = []\n test_counts: Dict[str, int] = {}\n\n for mod, nodes in g.pytest_nodes_by_module.items():\n test_counts[mod] = len(nodes or [])\n\n for _fqn, s in g.symbols_by_fqn.items():\n base = f\"{s.kind}:{s.name}\"\n sym_feats.append((base, 1.0))\n if s.signature:\n # keep in sym for backward-compat\n sym_feats.append((f\"sig:{s.signature}\", 0.5))\n type_feats.append((f\"sig:{s.signature}\", 1.0))\n if s.returns:\n sym_feats.append((f\"ret:{s.returns}\", 0.5))\n type_feats.append((f\"ret:{s.returns}\", 1.0))\n if s.doc:\n # downweight docs by length\n d = (s.doc or \"\").strip()\n if d:\n head = d.splitlines()[0][:160]\n doc_feats.append((f\"doc:{head}\", 0.25))\n mod_feats.append((f\"mod:{s.module}\", 0.2))\n\n # Call-graph features (multi-view)\n call_feats: List[Tuple[str, float]] = []\n try:\n for caller, callee in g.calls:\n call_feats.append((f\"call:{caller}->{callee}\", 1.0))\n except Exception:\n call_feats = []\n\n # Module import topology (indegree/outdegree proxies) with optional graph propagation\n indeg: Dict[str, int] = {}\n outdeg: Dict[str, int] = {m: len(deps) for m, deps in g.module_imports.items()}\n modules = list(g.modules.keys())\n mod_idx: Dict[str, int] = {m: i for i, m in enumerate(modules)}\n for m, deps in g.module_imports.items():\n for d in deps:\n indeg[d] = indeg.get(d, 0) + 1\n base_vec = np.zeros((len(modules),), dtype=np.float32)\n for m in modules:\n i = mod_idx[m]\n base_vec[i] = float(indeg.get(m, 0) + outdeg.get(m, 0) + test_counts.get(m, 0))\n prop_vec = base_vec.copy()\n if int(graph_prop_hops) > 0:\n # Build sparse adjacency (imports treated as undirected for smoothing)\n neigh: List[List[int]] = [[] for _ in modules]\n for m, deps in g.module_imports.items():\n i = mod_idx[m]\n for d in deps:\n if d in mod_idx:\n j = mod_idx[d]\n neigh[i].append(j)\n neigh[j].append(i)\n vec = prop_vec\n damp = float(graph_prop_damp)\n for _ in range(max(0, int(graph_prop_hops))):\n nxt = np.zeros_like(vec)\n for i, ns in enumerate(neigh):\n if not ns:\n continue\n s = 0.0\n for j in ns:\n s += float(vec[j])\n nxt[i] = (1.0 - damp) * float(base_vec[i]) + damp * (s / float(len(ns)))\n vec = nxt\n prop_vec = vec\n # normalize\n nrm = float(np.linalg.norm(prop_vec))\n if nrm > 0:\n prop_vec = prop_vec / nrm\n topo_feats: List[Tuple[str, float]] = []\n for m in modules:\n topo_feats.append((f\"indeg:{m}\", float(indeg.get(m, 0))))\n topo_feats.append((f\"outdeg:{m}\", float(outdeg.get(m, 0))))\n if test_counts.get(m):\n topo_feats.append((f\"tests:{m}\", float(test_counts[m])))\n if int(graph_prop_hops) > 0:\n topo_feats.append((f\"prop:{m}\", float(prop_vec[mod_idx[m]])))\n\n z_sym = _feature_hash(sym_feats, dim, seed + HASH_SEEDS[0])\n z_doc = _feature_hash(doc_feats, dim, seed + HASH_SEEDS[1])\n z_mod = _feature_hash(mod_feats, dim, seed + HASH_SEEDS[2])\n z_top = _feature_hash(topo_feats, dim, seed + HASH_SEEDS[3])\n z_types = _feature_hash(type_feats, dim, seed + HASH_SEEDS[5])\n z_calls = _feature_hash(call_feats, dim, seed + HASH_SEEDS[6])\n\n # Optional: include raw repository text/code hashed into the embedding\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0:\n # Streamed sparse accumulator for large repos\n text_acc: Dict[int, float] = {}\n # Prefer CodeGraph's indexed files if available; otherwise walk the repo\n files: List[str] = []\n try:\n files = list(getattr(g, \"indexed_files\", []) or [])\n except Exception:\n files = []\n if not files:\n # Fallback: collect common source files\n exts = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n # Normalize ignore list + .gitignore pathspec\n ignore_list = [os.path.normpath(p) for p in (ignore or [])]\n pspec = None\n try:\n gi = os.path.join(repo_root, \".gitignore\")\n if pathspec is not None and os.path.exists(gi):\n with open(gi, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n lines = [ln.rstrip(\"\\n\") for ln in fh]\n pspec = pathspec.PathSpec.from_lines(\"gitwildmatch\", lines)\n except Exception:\n pspec = None\n def _is_ignored(rel: str) -> bool:\n r = os.path.normpath(rel)\n if pspec is not None:\n if pspec.match_file(r.replace(os.sep, \"/\")):\n return True\n for pat in ignore_list:\n if r == pat or r.startswith(pat + os.sep):\n return True\n return False\n for root, dirs, fnames in os.walk(repo_root):\n rel_root = os.path.relpath(root, repo_root)\n if _is_ignored(rel_root):\n dirs[:] = []\n continue\n # prune ignored subdirs\n dirs[:] = [d for d in dirs if not _is_ignored(os.path.join(rel_root, d))]\n for fn in fnames:\n if os.path.splitext(fn)[1].lower() in exts:\n fp = os.path.join(root, fn)\n rel_fp = os.path.relpath(fp, repo_root)\n if _is_ignored(rel_fp):\n continue\n files.append(fp)\n # Normalize file paths and pre-filter by extension\n exts_all = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n norm_files: List[str] = []\n for f in files:\n p = f if os.path.isabs(f) else os.path.join(repo_root, f)\n if os.path.splitext(p)[1].lower() in exts_all:\n norm_files.append(p)\n\n # bytes_budget: None means unlimited; token_cap 0 means unlimited\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n # Per-file cap: avoid reading giant files fully when many are available\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n\n # Concurrent read/tokenize with early short-circuit\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in norm_files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if bytes_budget is not None and bytes_budget <= 0:\n # still add what we got, then stop\n pass\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n\n # Per-family normalize (layer-norm style), then weighted sum\n def _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n\n sym_w = 1.0\n doc_w = 1.0\n mod_w = 1.0\n top_w = 1.0\n txt_w = float(text_weight)\n\n z = (\n sym_w * _unit(z_sym)\n + doc_w * _unit(z_doc)\n + mod_w * _unit(z_mod)\n + top_w * _unit(z_top)\n + txt_w * _unit(z_text)\n + float(types_weight) * _unit(z_types)\n + float(calls_weight) * _unit(z_calls)\n + float(tests_weight) * _unit(np.zeros_like(z_sym)) # tests view added below\n )\n norm = float(np.linalg.norm(z))\n if norm > 0:\n z = z / norm\n\n # Tests view (counts already influence topology; add dedicated view from test nodes)\n z_tests = np.zeros((dim,), dtype=np.float32)\n try:\n test_feats: List[Tuple[str, float]] = []\n for mod, nodes in g.pytest_nodes_by_module.items():\n if nodes:\n test_feats.append((f\"tests:{mod}:{len(nodes)}\", 1.0))\n if test_feats:\n z_tests = _feature_hash(test_feats, dim, seed + HASH_SEEDS[7])\n except Exception:\n z_tests = np.zeros((dim,), dtype=np.float32)\n\n # Re-add tests view (unit) to z\n z = z + float(tests_weight) * _unit(z_tests)\n\n # Sparsity diagnostics (fraction non-zero)\n def _sparse_frac(x: np.ndarray) -> float:\n if x.size == 0:\n return 0.0\n return float((np.count_nonzero(x) / float(x.size)))\n\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": z_doc.astype(np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_tests\": z_tests.astype(np.float32),\n \"sparsity\": {\n \"z_sym\": _sparse_frac(z_sym),\n \"z_doc\": _sparse_frac(z_doc),\n \"z_mod\": _sparse_frac(z_mod),\n \"z_top\": _sparse_frac(z_top),\n \"z_text\": _sparse_frac(z_text),\n \"z_types\": _sparse_frac(z_types),\n \"z_calls\": _sparse_frac(z_calls),\n \"z_tests\": _sparse_frac(z_tests),\n },\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\n","source_hash":"70abd33382a7f11c15e60730fe6f0eadf3675016fd09e534bc3927fa63f91b54","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.embedding.build_repo_embedding","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.embedding.build_repo_embedding#L13-L300","kind":"function","name":"build_repo_embedding","path":"examples/scripts/embedding.py","language":"python","start_line":13,"end_line":300,"context_start_line":1,"context_end_line":302,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport os\nimport re\nimport numpy as np\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\n\nfrom examples.scripts.code_graph import CodeGraph\n\ndef build_repo_embedding(\n repo_root: str,\n *,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,\n calls_weight: float = 0.25,\n types_weight: float = 0.20,\n tests_weight: float = 0.15,\n graph_prop_hops: int = 0,\n graph_prop_damp: float = 0.85,\n ignore: Optional[List[str]] = None,\n) -> Dict[str, np.ndarray]:\n g = CodeGraph.load_or_build(repo_root, ignore=ignore)\n\n # Collect lightweight features\n sym_feats: List[Tuple[str, float]] = []\n doc_feats: List[Tuple[str, float]] = []\n mod_feats: List[Tuple[str, float]] = []\n type_feats: List[Tuple[str, float]] = []\n test_counts: Dict[str, int] = {}\n\n for mod, nodes in g.pytest_nodes_by_module.items():\n test_counts[mod] = len(nodes or [])\n\n for _fqn, s in g.symbols_by_fqn.items():\n base = f\"{s.kind}:{s.name}\"\n sym_feats.append((base, 1.0))\n if s.signature:\n # keep in sym for backward-compat\n sym_feats.append((f\"sig:{s.signature}\", 0.5))\n type_feats.append((f\"sig:{s.signature}\", 1.0))\n if s.returns:\n sym_feats.append((f\"ret:{s.returns}\", 0.5))\n type_feats.append((f\"ret:{s.returns}\", 1.0))\n if s.doc:\n # downweight docs by length\n d = (s.doc or \"\").strip()\n if d:\n head = d.splitlines()[0][:160]\n doc_feats.append((f\"doc:{head}\", 0.25))\n mod_feats.append((f\"mod:{s.module}\", 0.2))\n\n # Call-graph features (multi-view)\n call_feats: List[Tuple[str, float]] = []\n try:\n for caller, callee in g.calls:\n call_feats.append((f\"call:{caller}->{callee}\", 1.0))\n except Exception:\n call_feats = []\n\n # Module import topology (indegree/outdegree proxies) with optional graph propagation\n indeg: Dict[str, int] = {}\n outdeg: Dict[str, int] = {m: len(deps) for m, deps in g.module_imports.items()}\n modules = list(g.modules.keys())\n mod_idx: Dict[str, int] = {m: i for i, m in enumerate(modules)}\n for m, deps in g.module_imports.items():\n for d in deps:\n indeg[d] = indeg.get(d, 0) + 1\n base_vec = np.zeros((len(modules),), dtype=np.float32)\n for m in modules:\n i = mod_idx[m]\n base_vec[i] = float(indeg.get(m, 0) + outdeg.get(m, 0) + test_counts.get(m, 0))\n prop_vec = base_vec.copy()\n if int(graph_prop_hops) > 0:\n # Build sparse adjacency (imports treated as undirected for smoothing)\n neigh: List[List[int]] = [[] for _ in modules]\n for m, deps in g.module_imports.items():\n i = mod_idx[m]\n for d in deps:\n if d in mod_idx:\n j = mod_idx[d]\n neigh[i].append(j)\n neigh[j].append(i)\n vec = prop_vec\n damp = float(graph_prop_damp)\n for _ in range(max(0, int(graph_prop_hops))):\n nxt = np.zeros_like(vec)\n for i, ns in enumerate(neigh):\n if not ns:\n continue\n s = 0.0\n for j in ns:\n s += float(vec[j])\n nxt[i] = (1.0 - damp) * float(base_vec[i]) + damp * (s / float(len(ns)))\n vec = nxt\n prop_vec = vec\n # normalize\n nrm = float(np.linalg.norm(prop_vec))\n if nrm > 0:\n prop_vec = prop_vec / nrm\n topo_feats: List[Tuple[str, float]] = []\n for m in modules:\n topo_feats.append((f\"indeg:{m}\", float(indeg.get(m, 0))))\n topo_feats.append((f\"outdeg:{m}\", float(outdeg.get(m, 0))))\n if test_counts.get(m):\n topo_feats.append((f\"tests:{m}\", float(test_counts[m])))\n if int(graph_prop_hops) > 0:\n topo_feats.append((f\"prop:{m}\", float(prop_vec[mod_idx[m]])))\n\n z_sym = _feature_hash(sym_feats, dim, seed + HASH_SEEDS[0])\n z_doc = _feature_hash(doc_feats, dim, seed + HASH_SEEDS[1])\n z_mod = _feature_hash(mod_feats, dim, seed + HASH_SEEDS[2])\n z_top = _feature_hash(topo_feats, dim, seed + HASH_SEEDS[3])\n z_types = _feature_hash(type_feats, dim, seed + HASH_SEEDS[5])\n z_calls = _feature_hash(call_feats, dim, seed + HASH_SEEDS[6])\n\n # Optional: include raw repository text/code hashed into the embedding\n z_text = np.zeros((dim,), dtype=np.float32)\n if include_text and text_max_bytes and text_max_bytes > 0:\n # Streamed sparse accumulator for large repos\n text_acc: Dict[int, float] = {}\n # Prefer CodeGraph's indexed files if available; otherwise walk the repo\n files: List[str] = []\n try:\n files = list(getattr(g, \"indexed_files\", []) or [])\n except Exception:\n files = []\n if not files:\n # Fallback: collect common source files\n exts = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n # Normalize ignore list + .gitignore pathspec\n ignore_list = [os.path.normpath(p) for p in (ignore or [])]\n pspec = None\n try:\n gi = os.path.join(repo_root, \".gitignore\")\n if pathspec is not None and os.path.exists(gi):\n with open(gi, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n lines = [ln.rstrip(\"\\n\") for ln in fh]\n pspec = pathspec.PathSpec.from_lines(\"gitwildmatch\", lines)\n except Exception:\n pspec = None\n def _is_ignored(rel: str) -> bool:\n r = os.path.normpath(rel)\n if pspec is not None:\n if pspec.match_file(r.replace(os.sep, \"/\")):\n return True\n for pat in ignore_list:\n if r == pat or r.startswith(pat + os.sep):\n return True\n return False\n for root, dirs, fnames in os.walk(repo_root):\n rel_root = os.path.relpath(root, repo_root)\n if _is_ignored(rel_root):\n dirs[:] = []\n continue\n # prune ignored subdirs\n dirs[:] = [d for d in dirs if not _is_ignored(os.path.join(rel_root, d))]\n for fn in fnames:\n if os.path.splitext(fn)[1].lower() in exts:\n fp = os.path.join(root, fn)\n rel_fp = os.path.relpath(fp, repo_root)\n if _is_ignored(rel_fp):\n continue\n files.append(fp)\n # Normalize file paths and pre-filter by extension\n exts_all = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n norm_files: List[str] = []\n for f in files:\n p = f if os.path.isabs(f) else os.path.join(repo_root, f)\n if os.path.splitext(p)[1].lower() in exts_all:\n norm_files.append(p)\n\n # bytes_budget: None means unlimited; token_cap 0 means unlimited\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n # Per-file cap: avoid reading giant files fully when many are available\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n\n # Concurrent read/tokenize with early short-circuit\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in norm_files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if bytes_budget is not None and bytes_budget <= 0:\n # still add what we got, then stop\n pass\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n\n # Per-family normalize (layer-norm style), then weighted sum\n def _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n\n sym_w = 1.0\n doc_w = 1.0\n mod_w = 1.0\n top_w = 1.0\n txt_w = float(text_weight)\n\n z = (\n sym_w * _unit(z_sym)\n + doc_w * _unit(z_doc)\n + mod_w * _unit(z_mod)\n + top_w * _unit(z_top)\n + txt_w * _unit(z_text)\n + float(types_weight) * _unit(z_types)\n + float(calls_weight) * _unit(z_calls)\n + float(tests_weight) * _unit(np.zeros_like(z_sym)) # tests view added below\n )\n norm = float(np.linalg.norm(z))\n if norm > 0:\n z = z / norm\n\n # Tests view (counts already influence topology; add dedicated view from test nodes)\n z_tests = np.zeros((dim,), dtype=np.float32)\n try:\n test_feats: List[Tuple[str, float]] = []\n for mod, nodes in g.pytest_nodes_by_module.items():\n if nodes:\n test_feats.append((f\"tests:{mod}:{len(nodes)}\", 1.0))\n if test_feats:\n z_tests = _feature_hash(test_feats, dim, seed + HASH_SEEDS[7])\n except Exception:\n z_tests = np.zeros((dim,), dtype=np.float32)\n\n # Re-add tests view (unit) to z\n z = z + float(tests_weight) * _unit(z_tests)\n\n # Sparsity diagnostics (fraction non-zero)\n def _sparse_frac(x: np.ndarray) -> float:\n if x.size == 0:\n return 0.0\n return float((np.count_nonzero(x) / float(x.size)))\n\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": z_doc.astype(np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_tests\": z_tests.astype(np.float32),\n \"sparsity\": {\n \"z_sym\": _sparse_frac(z_sym),\n \"z_doc\": _sparse_frac(z_doc),\n \"z_mod\": _sparse_frac(z_mod),\n \"z_top\": _sparse_frac(z_top),\n \"z_text\": _sparse_frac(z_text),\n \"z_types\": _sparse_frac(z_types),\n \"z_calls\": _sparse_frac(z_calls),\n \"z_tests\": _sparse_frac(z_tests),\n },\n }\n if include_text:\n result[\"z_text\"] = z_text.astype(np.float32)\n return result\n\n","source_hash":"70abd33382a7f11c15e60730fe6f0eadf3675016fd09e534bc3927fa63f91b54","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.embedding._unit","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.embedding._unit#L233-L235","kind":"function","name":"_unit","path":"examples/scripts/embedding.py","language":"python","start_line":233,"end_line":235,"context_start_line":213,"context_end_line":255,"code":" if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if bytes_budget is not None and bytes_budget <= 0:\n # still add what we got, then stop\n pass\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break\n\n if text_acc:\n z_text = _dense_from_sparse(text_acc, dim)\n\n # Per-family normalize (layer-norm style), then weighted sum\n def _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n\n sym_w = 1.0\n doc_w = 1.0\n mod_w = 1.0\n top_w = 1.0\n txt_w = float(text_weight)\n\n z = (\n sym_w * _unit(z_sym)\n + doc_w * _unit(z_doc)\n + mod_w * _unit(z_mod)\n + top_w * _unit(z_top)\n + txt_w * _unit(z_text)\n + float(types_weight) * _unit(z_types)\n + float(calls_weight) * _unit(z_calls)\n + float(tests_weight) * _unit(np.zeros_like(z_sym)) # tests view added below\n )\n norm = float(np.linalg.norm(z))\n if norm > 0:\n z = z / norm","source_hash":"70abd33382a7f11c15e60730fe6f0eadf3675016fd09e534bc3927fa63f91b54","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.embedding._sparse_frac","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.embedding._sparse_frac#L273-L276","kind":"function","name":"_sparse_frac","path":"examples/scripts/embedding.py","language":"python","start_line":273,"end_line":276,"context_start_line":253,"context_end_line":296,"code":" norm = float(np.linalg.norm(z))\n if norm > 0:\n z = z / norm\n\n # Tests view (counts already influence topology; add dedicated view from test nodes)\n z_tests = np.zeros((dim,), dtype=np.float32)\n try:\n test_feats: List[Tuple[str, float]] = []\n for mod, nodes in g.pytest_nodes_by_module.items():\n if nodes:\n test_feats.append((f\"tests:{mod}:{len(nodes)}\", 1.0))\n if test_feats:\n z_tests = _feature_hash(test_feats, dim, seed + HASH_SEEDS[7])\n except Exception:\n z_tests = np.zeros((dim,), dtype=np.float32)\n\n # Re-add tests view (unit) to z\n z = z + float(tests_weight) * _unit(z_tests)\n\n # Sparsity diagnostics (fraction non-zero)\n def _sparse_frac(x: np.ndarray) -> float:\n if x.size == 0:\n return 0.0\n return float((np.count_nonzero(x) / float(x.size)))\n\n result = {\n \"z\": z.astype(np.float32),\n \"z_sym\": z_sym.astype(np.float32),\n \"z_doc\": z_doc.astype(np.float32),\n \"z_mod\": z_mod.astype(np.float32),\n \"z_top\": z_top.astype(np.float32),\n \"z_types\": z_types.astype(np.float32),\n \"z_calls\": z_calls.astype(np.float32),\n \"z_tests\": z_tests.astype(np.float32),\n \"sparsity\": {\n \"z_sym\": _sparse_frac(z_sym),\n \"z_doc\": _sparse_frac(z_doc),\n \"z_mod\": _sparse_frac(z_mod),\n \"z_top\": _sparse_frac(z_top),\n \"z_text\": _sparse_frac(z_text),\n \"z_types\": _sparse_frac(z_types),\n \"z_calls\": _sparse_frac(z_calls),\n \"z_tests\": _sparse_frac(z_tests),\n },","source_hash":"70abd33382a7f11c15e60730fe6f0eadf3675016fd09e534bc3927fa63f91b54","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.embedding._process_file","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.embedding._process_file#L184-L207","kind":"function","name":"_process_file","path":"examples/scripts/embedding.py","language":"python","start_line":184,"end_line":207,"context_start_line":164,"context_end_line":227,"code":" for fn in fnames:\n if os.path.splitext(fn)[1].lower() in exts:\n fp = os.path.join(root, fn)\n rel_fp = os.path.relpath(fp, repo_root)\n if _is_ignored(rel_fp):\n continue\n files.append(fp)\n # Normalize file paths and pre-filter by extension\n exts_all = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n norm_files: List[str] = []\n for f in files:\n p = f if os.path.isabs(f) else os.path.join(repo_root, f)\n if os.path.splitext(p)[1].lower() in exts_all:\n norm_files.append(p)\n\n # bytes_budget: None means unlimited; token_cap 0 means unlimited\n bytes_budget: Optional[int] = int(text_max_bytes) if int(text_max_bytes) > 0 else None\n token_cap = int(max(0, int(max_text_tokens))) if max_text_tokens is not None else 0\n tokens_emitted = 0\n\n def _process_file(fp: str) -> Tuple[int, int, Dict[int, float]]:\n try:\n # Per-file cap: avoid reading giant files fully when many are available\n per_cap = int(text_max_bytes)\n with open(fp, \"rb\") as fh:\n raw = fh.read(per_cap)\n if b\"\\x00\" in raw:\n return (0, 0, {})\n n_bytes = len(raw)\n text = raw.decode(\"utf-8\", errors=\"ignore\").lower()\n toks = re.findall(r\"[a-zA-Z0-9_]+\", text)\n if not toks:\n return (n_bytes, 0, {})\n n = 3\n stride = 2\n acc_local: Dict[int, float] = {}\n ng_count = 0\n for i in range(0, max(0, len(toks) - n + 1), stride):\n key = f\"text:{' '.join(toks[i:i+n])}\"\n _accumulate_sparse(acc_local, key, float(text_weight), dim, seed + HASH_SEEDS[4])\n ng_count += 1\n return (n_bytes, ng_count, acc_local)\n except Exception:\n return (0, 0, {})\n\n # Concurrent read/tokenize with early short-circuit\n with ThreadPoolExecutor(max_workers=min(8, max(2, os.cpu_count() or 8))) as ex:\n futures = [ex.submit(_process_file, fp) for fp in norm_files]\n for fut in as_completed(futures):\n if bytes_budget is not None and bytes_budget <= 0:\n break\n n_bytes, ng_count, acc_local = fut.result()\n if bytes_budget is not None:\n bytes_budget -= n_bytes\n if bytes_budget is not None and bytes_budget <= 0:\n # still add what we got, then stop\n pass\n if not acc_local:\n continue\n tokens_emitted += int(ng_count)\n for k, v in acc_local.items():\n text_acc[k] = float(text_acc.get(k, 0.0)) + float(v)\n if token_cap and tokens_emitted >= token_cap:\n break","source_hash":"70abd33382a7f11c15e60730fe6f0eadf3675016fd09e534bc3927fa63f91b54","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.embedding._is_ignored","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.embedding._is_ignored#L148-L156","kind":"function","name":"_is_ignored","path":"examples/scripts/embedding.py","language":"python","start_line":148,"end_line":156,"context_start_line":128,"context_end_line":176,"code":" # Prefer CodeGraph's indexed files if available; otherwise walk the repo\n files: List[str] = []\n try:\n files = list(getattr(g, \"indexed_files\", []) or [])\n except Exception:\n files = []\n if not files:\n # Fallback: collect common source files\n exts = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n # Normalize ignore list + .gitignore pathspec\n ignore_list = [os.path.normpath(p) for p in (ignore or [])]\n pspec = None\n try:\n gi = os.path.join(repo_root, \".gitignore\")\n if pathspec is not None and os.path.exists(gi):\n with open(gi, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n lines = [ln.rstrip(\"\\n\") for ln in fh]\n pspec = pathspec.PathSpec.from_lines(\"gitwildmatch\", lines)\n except Exception:\n pspec = None\n def _is_ignored(rel: str) -> bool:\n r = os.path.normpath(rel)\n if pspec is not None:\n if pspec.match_file(r.replace(os.sep, \"/\")):\n return True\n for pat in ignore_list:\n if r == pat or r.startswith(pat + os.sep):\n return True\n return False\n for root, dirs, fnames in os.walk(repo_root):\n rel_root = os.path.relpath(root, repo_root)\n if _is_ignored(rel_root):\n dirs[:] = []\n continue\n # prune ignored subdirs\n dirs[:] = [d for d in dirs if not _is_ignored(os.path.join(rel_root, d))]\n for fn in fnames:\n if os.path.splitext(fn)[1].lower() in exts:\n fp = os.path.join(root, fn)\n rel_fp = os.path.relpath(fp, repo_root)\n if _is_ignored(rel_fp):\n continue\n files.append(fp)\n # Normalize file paths and pre-filter by extension\n exts_all = {\".py\", \".md\", \".rst\", \".txt\", \".json\", \".toml\", \".yaml\", \".yml\", \".ini\"}\n norm_files: List[str] = []\n for f in files:\n p = f if os.path.isabs(f) else os.path.join(repo_root, f)\n if os.path.splitext(p)[1].lower() in exts_all:","source_hash":"70abd33382a7f11c15e60730fe6f0eadf3675016fd09e534bc3927fa63f91b54","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.mbpp_loader","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.mbpp_loader#L1-L96","kind":"module","name":"examples.scripts.mbpp_loader","path":"examples/scripts/mbpp_loader.py","language":"python","start_line":1,"end_line":96,"context_start_line":1,"context_end_line":96,"code":"from __future__ import annotations\n\nfrom typing import List, Tuple\n\n\ndef load_mbpp_texts(max_n: int = 128, split: str = \"train\") -> Tuple[List[str], int]:\n\t\"\"\"\n\tLoad MBPP via Hugging Face datasets and return a list of training texts.\n\tEach text is prompt + solution combined to form a code-aware LM sample.\n\tReturns (texts, total_available).\n\t\"\"\"\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn [], 0\n\t# Try common configs in order of availability\n\tds = None\n\tfor name in (\"mbpp\",):\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tif subset is None:\n\t\t\t\t\tds = load_dataset(name, split=split)\n\t\t\t\telse:\n\t\t\t\t\tds = load_dataset(name, subset, split=split)\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds is not None:\n\t\t\tbreak\n\tif ds is None:\n\t\treturn [], 0\n\ttexts: List[str] = []\n\ttotal = len(ds)\n\tfor i in range(total):\n\t\trow = ds[i]\n\t\t# MBPP variants have fields like: \"text\" or \"prompt\"/\"question\", \"code\" (solution), \"test_list\"\n\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\tif not prompt and not code:\n\t\t\tcontinue\n\t\t# Simple concatenation; training LM on both description and solution\n\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\tif combined:\n\t\t\ttexts.append(combined)\n\t\tif len(texts) >= int(max_n):\n\t\t\tbreak\n\treturn texts, total\n\n\ndef load_mbpp_texts_all_splits(max_n: int = 1_000_000) -> Tuple[List[str], int]:\n\t\"\"\"\n\tLoad MBPP across all available splits (e.g., train/validation/test) and concatenate.\n\tReturns (texts, total_available_across_splits).\n\t\"\"\"\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn [], 0\n\tds_dict = None\n\tfor name in (\"mbpp\",):\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tif subset is None:\n\t\t\t\t\tds_dict = load_dataset(name) # DatasetDict\n\t\t\t\telse:\n\t\t\t\t\tds_dict = load_dataset(name, subset) # DatasetDict\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds_dict is not None:\n\t\t\tbreak\n\tif ds_dict is None:\n\t\treturn [], 0\n\ttexts: List[str] = []\n\ttotal = 0\n\t# Iterate splits in a stable order\n\tfor split_name in (\"train\", \"validation\", \"test\"):\n\t\tif split_name not in ds_dict:\n\t\t\tcontinue\n\t\tds = ds_dict[split_name]\n\t\tsz = len(ds)\n\t\ttotal += sz\n\t\tfor i in range(sz):\n\t\t\trow = ds[i]\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\t\tif not prompt and not code:\n\t\t\t\tcontinue\n\t\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\tif len(texts) >= int(max_n):\n\t\t\t\treturn texts, total\n\treturn texts, total\n\n","source_hash":"e4cb070c07272a433640e767ea3100b730d7c7d7a487bdd94c82694672add025","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.mbpp_loader.load_mbpp_texts","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.mbpp_loader.load_mbpp_texts#L6-L47","kind":"function","name":"load_mbpp_texts","path":"examples/scripts/mbpp_loader.py","language":"python","start_line":6,"end_line":47,"context_start_line":1,"context_end_line":67,"code":"from __future__ import annotations\n\nfrom typing import List, Tuple\n\n\ndef load_mbpp_texts(max_n: int = 128, split: str = \"train\") -> Tuple[List[str], int]:\n\t\"\"\"\n\tLoad MBPP via Hugging Face datasets and return a list of training texts.\n\tEach text is prompt + solution combined to form a code-aware LM sample.\n\tReturns (texts, total_available).\n\t\"\"\"\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn [], 0\n\t# Try common configs in order of availability\n\tds = None\n\tfor name in (\"mbpp\",):\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tif subset is None:\n\t\t\t\t\tds = load_dataset(name, split=split)\n\t\t\t\telse:\n\t\t\t\t\tds = load_dataset(name, subset, split=split)\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds is not None:\n\t\t\tbreak\n\tif ds is None:\n\t\treturn [], 0\n\ttexts: List[str] = []\n\ttotal = len(ds)\n\tfor i in range(total):\n\t\trow = ds[i]\n\t\t# MBPP variants have fields like: \"text\" or \"prompt\"/\"question\", \"code\" (solution), \"test_list\"\n\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\tif not prompt and not code:\n\t\t\tcontinue\n\t\t# Simple concatenation; training LM on both description and solution\n\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\tif combined:\n\t\t\ttexts.append(combined)\n\t\tif len(texts) >= int(max_n):\n\t\t\tbreak\n\treturn texts, total\n\n\ndef load_mbpp_texts_all_splits(max_n: int = 1_000_000) -> Tuple[List[str], int]:\n\t\"\"\"\n\tLoad MBPP across all available splits (e.g., train/validation/test) and concatenate.\n\tReturns (texts, total_available_across_splits).\n\t\"\"\"\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn [], 0\n\tds_dict = None\n\tfor name in (\"mbpp\",):\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tif subset is None:\n\t\t\t\t\tds_dict = load_dataset(name) # DatasetDict\n\t\t\t\telse:\n\t\t\t\t\tds_dict = load_dataset(name, subset) # DatasetDict\n\t\t\t\tbreak","source_hash":"e4cb070c07272a433640e767ea3100b730d7c7d7a487bdd94c82694672add025","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.mbpp_loader.load_mbpp_texts_all_splits","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.mbpp_loader.load_mbpp_texts_all_splits#L50-L94","kind":"function","name":"load_mbpp_texts_all_splits","path":"examples/scripts/mbpp_loader.py","language":"python","start_line":50,"end_line":94,"context_start_line":30,"context_end_line":96,"code":"\tif ds is None:\n\t\treturn [], 0\n\ttexts: List[str] = []\n\ttotal = len(ds)\n\tfor i in range(total):\n\t\trow = ds[i]\n\t\t# MBPP variants have fields like: \"text\" or \"prompt\"/\"question\", \"code\" (solution), \"test_list\"\n\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\tif not prompt and not code:\n\t\t\tcontinue\n\t\t# Simple concatenation; training LM on both description and solution\n\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\tif combined:\n\t\t\ttexts.append(combined)\n\t\tif len(texts) >= int(max_n):\n\t\t\tbreak\n\treturn texts, total\n\n\ndef load_mbpp_texts_all_splits(max_n: int = 1_000_000) -> Tuple[List[str], int]:\n\t\"\"\"\n\tLoad MBPP across all available splits (e.g., train/validation/test) and concatenate.\n\tReturns (texts, total_available_across_splits).\n\t\"\"\"\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn [], 0\n\tds_dict = None\n\tfor name in (\"mbpp\",):\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tif subset is None:\n\t\t\t\t\tds_dict = load_dataset(name) # DatasetDict\n\t\t\t\telse:\n\t\t\t\t\tds_dict = load_dataset(name, subset) # DatasetDict\n\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\tif ds_dict is not None:\n\t\t\tbreak\n\tif ds_dict is None:\n\t\treturn [], 0\n\ttexts: List[str] = []\n\ttotal = 0\n\t# Iterate splits in a stable order\n\tfor split_name in (\"train\", \"validation\", \"test\"):\n\t\tif split_name not in ds_dict:\n\t\t\tcontinue\n\t\tds = ds_dict[split_name]\n\t\tsz = len(ds)\n\t\ttotal += sz\n\t\tfor i in range(sz):\n\t\t\trow = ds[i]\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\t\tif not prompt and not code:\n\t\t\t\tcontinue\n\t\t\tcombined = (prompt + \"\\n\" + code).strip()\n\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\tif len(texts) >= int(max_n):\n\t\t\t\treturn texts, total\n\treturn texts, total\n\n","source_hash":"e4cb070c07272a433640e767ea3100b730d7c7d7a487bdd94c82694672add025","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.codegraph_core#L1-L198","kind":"module","name":"examples.scripts.codegraph_core","path":"examples/scripts/codegraph_core.py","language":"python","start_line":1,"end_line":198,"context_start_line":1,"context_end_line":198,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)\n except Exception:\n tree = None\n if tree is None:\n continue\n # functions/classes\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n fid = f\"py:{mod}.{name}\"\n self.entities_by_id[fid] = CGEntity(\n id=fid, kind=\"function\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(fid)\n self.edges_list.append(CGEdge(src=mid, dst=fid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(fid)\n elif isinstance(node, ast.ClassDef):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n cid = f\"py:{mod}.{name}\"\n self.entities_by_id[cid] = CGEntity(\n id=cid, kind=\"class\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(cid)\n self.edges_list.append(CGEdge(src=mid, dst=cid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(cid)\n # imports (module-level)\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for alias in node.names:\n im = alias.name\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n elif isinstance(node, ast.ImportFrom):\n im = node.module or \"\"\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n except Exception:\n pass\n # calls (best-effort): record identifiers used in Call nodes\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Call):\n fn = getattr(node, \"func\", None)\n name = None\n if isinstance(fn, ast.Attribute):\n name = getattr(fn, \"attr\", None)\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = str(name).lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.FileSpan","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.codegraph_core.FileSpan#L11-L14","kind":"class","name":"FileSpan","path":"examples/scripts/codegraph_core.py","language":"python","start_line":11,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.CGEntity","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.codegraph_core.CGEntity#L18-L25","kind":"class","name":"CGEntity","path":"examples/scripts/codegraph_core.py","language":"python","start_line":18,"end_line":25,"context_start_line":1,"context_end_line":45,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.CGEdge","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.codegraph_core.CGEdge#L29-L32","kind":"class","name":"CGEdge","path":"examples/scripts/codegraph_core.py","language":"python","start_line":29,"end_line":32,"context_start_line":9,"context_end_line":52,"code":"\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.CodeGraph","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.codegraph_core.CodeGraph#L35-L196","kind":"class","name":"CodeGraph","path":"examples/scripts/codegraph_core.py","language":"python","start_line":35,"end_line":196,"context_start_line":15,"context_end_line":198,"code":"\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)\n except Exception:\n tree = None\n if tree is None:\n continue\n # functions/classes\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n fid = f\"py:{mod}.{name}\"\n self.entities_by_id[fid] = CGEntity(\n id=fid, kind=\"function\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(fid)\n self.edges_list.append(CGEdge(src=mid, dst=fid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(fid)\n elif isinstance(node, ast.ClassDef):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n cid = f\"py:{mod}.{name}\"\n self.entities_by_id[cid] = CGEntity(\n id=cid, kind=\"class\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(cid)\n self.edges_list.append(CGEdge(src=mid, dst=cid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(cid)\n # imports (module-level)\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for alias in node.names:\n im = alias.name\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n elif isinstance(node, ast.ImportFrom):\n im = node.module or \"\"\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n except Exception:\n pass\n # calls (best-effort): record identifiers used in Call nodes\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Call):\n fn = getattr(node, \"func\", None)\n name = None\n if isinstance(fn, ast.Attribute):\n name = getattr(fn, \"attr\", None)\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = str(name).lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core.__init__#L36-L44","kind":"function","name":"__init__","path":"examples/scripts/codegraph_core.py","language":"python","start_line":36,"end_line":44,"context_start_line":16,"context_end_line":64,"code":"\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.build","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core.build#L47-L137","kind":"function","name":"build","path":"examples/scripts/codegraph_core.py","language":"python","start_line":47,"end_line":137,"context_start_line":27,"context_end_line":157,"code":"\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)\n except Exception:\n tree = None\n if tree is None:\n continue\n # functions/classes\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n fid = f\"py:{mod}.{name}\"\n self.entities_by_id[fid] = CGEntity(\n id=fid, kind=\"function\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(fid)\n self.edges_list.append(CGEdge(src=mid, dst=fid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(fid)\n elif isinstance(node, ast.ClassDef):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n cid = f\"py:{mod}.{name}\"\n self.entities_by_id[cid] = CGEntity(\n id=cid, kind=\"class\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(cid)\n self.edges_list.append(CGEdge(src=mid, dst=cid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(cid)\n # imports (module-level)\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for alias in node.names:\n im = alias.name\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n elif isinstance(node, ast.ImportFrom):\n im = node.module or \"\"\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n except Exception:\n pass\n # calls (best-effort): record identifiers used in Call nodes\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Call):\n fn = getattr(node, \"func\", None)\n name = None\n if isinstance(fn, ast.Attribute):\n name = getattr(fn, \"attr\", None)\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = str(name).lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.entities","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core.entities#L140-L141","kind":"function","name":"entities","path":"examples/scripts/codegraph_core.py","language":"python","start_line":140,"end_line":141,"context_start_line":120,"context_end_line":161,"code":" name = None\n if isinstance(fn, ast.Attribute):\n name = getattr(fn, \"attr\", None)\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = str(name).lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.edges","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core.edges#L143-L148","kind":"function","name":"edges","path":"examples/scripts/codegraph_core.py","language":"python","start_line":143,"end_line":148,"context_start_line":123,"context_end_line":168,"code":" elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = str(name).lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.file_hash","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core.file_hash#L150-L151","kind":"function","name":"file_hash","path":"examples/scripts/codegraph_core.py","language":"python","start_line":150,"end_line":151,"context_start_line":130,"context_end_line":171,"code":" pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.ids_for_file","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core.ids_for_file#L153-L154","kind":"function","name":"ids_for_file","path":"examples/scripts/codegraph_core.py","language":"python","start_line":153,"end_line":154,"context_start_line":133,"context_end_line":174,"code":" if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core.find_identifier_ids","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core.find_identifier_ids#L156-L157","kind":"function","name":"find_identifier_ids","path":"examples/scripts/codegraph_core.py","language":"python","start_line":156,"end_line":157,"context_start_line":136,"context_end_line":177,"code":" self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core._discover_py_files","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core._discover_py_files#L160-L171","kind":"function","name":"_discover_py_files","path":"examples/scripts/codegraph_core.py","language":"python","start_line":160,"end_line":171,"context_start_line":140,"context_end_line":191,"code":" def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core._module_name_for","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core._module_name_for#L173-L179","kind":"function","name":"_module_name_for","path":"examples/scripts/codegraph_core.py","language":"python","start_line":173,"end_line":179,"context_start_line":153,"context_end_line":198,"code":" def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core._safe_count_lines","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core._safe_count_lines#L181-L186","kind":"function","name":"_safe_count_lines","path":"examples/scripts/codegraph_core.py","language":"python","start_line":181,"end_line":186,"context_start_line":161,"context_end_line":198,"code":" out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.codegraph_core._precompute_hashes","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.codegraph_core._precompute_hashes#L188-L196","kind":"function","name":"_precompute_hashes","path":"examples/scripts/codegraph_core.py","language":"python","start_line":188,"end_line":196,"context_start_line":168,"context_end_line":198,"code":" continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.safety_policy_guard","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.safety_policy_guard#L1-L21","kind":"module","name":"examples.scripts.safety_policy_guard","path":"examples/scripts/safety_policy_guard.py","language":"python","start_line":1,"end_line":21,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict, List\n\n\ndef enforce(policy: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tApply simple safety gates like 'apply_patch requires tests_green' etc.\n\tReturns {'ok': bool, 'missing': [..]}.\n\t\"\"\"\n\treqs: List[str] = list(policy.get(\"requires\", []))\n\tpassed = []\n\tmissing = []\n\tfor r in reqs:\n\t\tif context.get(r, False):\n\t\t\tpassed.append(r)\n\t\telse:\n\t\t\tmissing.append(r)\n\treturn {\"ok\": len(missing) == 0, \"missing\": missing}\n\n","source_hash":"039ac5716a10a9122527dbe0e9a4365c91d500b4760d2c2b2574a20718c91588","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.safety_policy_guard.enforce","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.safety_policy_guard.enforce#L6-L19","kind":"function","name":"enforce","path":"examples/scripts/safety_policy_guard.py","language":"python","start_line":6,"end_line":19,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict, List\n\n\ndef enforce(policy: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tApply simple safety gates like 'apply_patch requires tests_green' etc.\n\tReturns {'ok': bool, 'missing': [..]}.\n\t\"\"\"\n\treqs: List[str] = list(policy.get(\"requires\", []))\n\tpassed = []\n\tmissing = []\n\tfor r in reqs:\n\t\tif context.get(r, False):\n\t\t\tpassed.append(r)\n\t\telse:\n\t\t\tmissing.append(r)\n\treturn {\"ok\": len(missing) == 0, \"missing\": missing}\n\n","source_hash":"039ac5716a10a9122527dbe0e9a4365c91d500b4760d2c2b2574a20718c91588","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.benchmark_verifier","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.benchmark_verifier#L1-L21","kind":"module","name":"examples.scripts.benchmark_verifier","path":"examples/scripts/benchmark_verifier.py","language":"python","start_line":1,"end_line":21,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport subprocess\nfrom typing import Any, Dict, List, Optional\n\n\ndef run_benchmark_verifier(cmd: Optional[List[str]], env: Optional[Dict[str, str]] = None, timeout_sec: int = 120) -> bool:\n\t\"\"\"\n\tRuns an external verifier command (official checker) and returns True on success.\n\tWhen cmd is None, returns True (no-op) for smoke runs.\n\t\"\"\"\n\tif not cmd:\n\t\treturn True\n\ttry:\n\t\trc = subprocess.call(cmd, env=env, timeout=timeout_sec) # type: ignore\n\texcept Exception:\n\t\treturn False\n\treturn rc == 0\n\n\n","source_hash":"08cf695592dc82fbb11190555a8692e4f08a45c3b3bca0da4b74e9261aa57dbd","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.benchmark_verifier.run_benchmark_verifier","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.benchmark_verifier.run_benchmark_verifier#L7-L18","kind":"function","name":"run_benchmark_verifier","path":"examples/scripts/benchmark_verifier.py","language":"python","start_line":7,"end_line":18,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport subprocess\nfrom typing import Any, Dict, List, Optional\n\n\ndef run_benchmark_verifier(cmd: Optional[List[str]], env: Optional[Dict[str, str]] = None, timeout_sec: int = 120) -> bool:\n\t\"\"\"\n\tRuns an external verifier command (official checker) and returns True on success.\n\tWhen cmd is None, returns True (no-op) for smoke runs.\n\t\"\"\"\n\tif not cmd:\n\t\treturn True\n\ttry:\n\t\trc = subprocess.call(cmd, env=env, timeout=timeout_sec) # type: ignore\n\texcept Exception:\n\t\treturn False\n\treturn rc == 0\n\n\n","source_hash":"08cf695592dc82fbb11190555a8692e4f08a45c3b3bca0da4b74e9261aa57dbd","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.benchmark_adapter_synth","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.benchmark_adapter_synth#L1-L49","kind":"module","name":"examples.scripts.benchmark_adapter_synth","path":"examples/scripts/benchmark_adapter_synth.py","language":"python","start_line":1,"end_line":49,"context_start_line":1,"context_end_line":49,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\n\ndef synth_benchmark_heads(adapters_dir: str, datasets: List[str]) -> List[str]:\n\t\"\"\"\n\tSmoke-level synthesis of benchmark-aware adapter heads.\n\tCreates tiny shard stubs under adapters/shards/* so run.py can pick them up.\n\t\"\"\"\n\tadir = Path(adapters_dir)\n\tshards_dir = adir / \"shards\"\n\tshards_dir.mkdir(parents=True, exist_ok=True)\n\tcreated: List[str] = []\n\tfor spec in datasets:\n\t\tds_name = spec.replace(\":\", \"_\").replace(\"/\", \"_\")\n\t\tpath = shards_dir / f\"benchmark_{ds_name}_head.json\"\n\t\tobj: Dict[str, Any] = {\n\t\t\t\"schema_version\": 1,\n\t\t\t\"type\": \"benchmark_head\",\n\t\t\t\"dataset\": spec,\n\t\t\t\"rank\": 8,\n\t\t\t\"prior\": {\"recent_pass_boost\": 0.2},\n\t\t}\n\t\tpath.write_text(json.dumps(obj, indent=2), encoding=\"utf-8\")\n\t\tcreated.append(str(path))\n\treturn created\n\n\ndef main() -> None:\n\tif len(sys.argv) < 3:\n\t\tprint(\"usage: python benchmark_adapter_synth.py \", file=sys.stderr)\n\t\tsys.exit(2)\n\tadapters_dir = sys.argv[1]\n\tdatasets_csv = sys.argv[2]\n\tdatasets = [s.strip() for s in datasets_csv.split(\",\") if s.strip()]\n\tpaths = synth_benchmark_heads(adapters_dir, datasets)\n\tprint(\"\\n\".join(paths))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","source_hash":"8342d0ac5598022ffad9e25e34a7c50ec930bd55f84a0db9d5495d3eefb74d9e","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.benchmark_adapter_synth.synth_benchmark_heads","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.benchmark_adapter_synth.synth_benchmark_heads#L10-L31","kind":"function","name":"synth_benchmark_heads","path":"examples/scripts/benchmark_adapter_synth.py","language":"python","start_line":10,"end_line":31,"context_start_line":1,"context_end_line":49,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\n\ndef synth_benchmark_heads(adapters_dir: str, datasets: List[str]) -> List[str]:\n\t\"\"\"\n\tSmoke-level synthesis of benchmark-aware adapter heads.\n\tCreates tiny shard stubs under adapters/shards/* so run.py can pick them up.\n\t\"\"\"\n\tadir = Path(adapters_dir)\n\tshards_dir = adir / \"shards\"\n\tshards_dir.mkdir(parents=True, exist_ok=True)\n\tcreated: List[str] = []\n\tfor spec in datasets:\n\t\tds_name = spec.replace(\":\", \"_\").replace(\"/\", \"_\")\n\t\tpath = shards_dir / f\"benchmark_{ds_name}_head.json\"\n\t\tobj: Dict[str, Any] = {\n\t\t\t\"schema_version\": 1,\n\t\t\t\"type\": \"benchmark_head\",\n\t\t\t\"dataset\": spec,\n\t\t\t\"rank\": 8,\n\t\t\t\"prior\": {\"recent_pass_boost\": 0.2},\n\t\t}\n\t\tpath.write_text(json.dumps(obj, indent=2), encoding=\"utf-8\")\n\t\tcreated.append(str(path))\n\treturn created\n\n\ndef main() -> None:\n\tif len(sys.argv) < 3:\n\t\tprint(\"usage: python benchmark_adapter_synth.py \", file=sys.stderr)\n\t\tsys.exit(2)\n\tadapters_dir = sys.argv[1]\n\tdatasets_csv = sys.argv[2]\n\tdatasets = [s.strip() for s in datasets_csv.split(\",\") if s.strip()]\n\tpaths = synth_benchmark_heads(adapters_dir, datasets)\n\tprint(\"\\n\".join(paths))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","source_hash":"8342d0ac5598022ffad9e25e34a7c50ec930bd55f84a0db9d5495d3eefb74d9e","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.benchmark_adapter_synth.main","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.benchmark_adapter_synth.main#L34-L42","kind":"function","name":"main","path":"examples/scripts/benchmark_adapter_synth.py","language":"python","start_line":34,"end_line":42,"context_start_line":14,"context_end_line":49,"code":"\t\"\"\"\n\tadir = Path(adapters_dir)\n\tshards_dir = adir / \"shards\"\n\tshards_dir.mkdir(parents=True, exist_ok=True)\n\tcreated: List[str] = []\n\tfor spec in datasets:\n\t\tds_name = spec.replace(\":\", \"_\").replace(\"/\", \"_\")\n\t\tpath = shards_dir / f\"benchmark_{ds_name}_head.json\"\n\t\tobj: Dict[str, Any] = {\n\t\t\t\"schema_version\": 1,\n\t\t\t\"type\": \"benchmark_head\",\n\t\t\t\"dataset\": spec,\n\t\t\t\"rank\": 8,\n\t\t\t\"prior\": {\"recent_pass_boost\": 0.2},\n\t\t}\n\t\tpath.write_text(json.dumps(obj, indent=2), encoding=\"utf-8\")\n\t\tcreated.append(str(path))\n\treturn created\n\n\ndef main() -> None:\n\tif len(sys.argv) < 3:\n\t\tprint(\"usage: python benchmark_adapter_synth.py \", file=sys.stderr)\n\t\tsys.exit(2)\n\tadapters_dir = sys.argv[1]\n\tdatasets_csv = sys.argv[2]\n\tdatasets = [s.strip() for s in datasets_csv.split(\",\") if s.strip()]\n\tpaths = synth_benchmark_heads(adapters_dir, datasets)\n\tprint(\"\\n\".join(paths))\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","source_hash":"8342d0ac5598022ffad9e25e34a7c50ec930bd55f84a0db9d5495d3eefb74d9e","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.python_repo_graph#L1-L155","kind":"module","name":"examples.scripts.python_repo_graph","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":1,"end_line":155,"context_start_line":1,"context_end_line":155,"code":"from __future__ import annotations\n\nimport os\nfrom typing import Iterable, List, Tuple, Dict, Optional, Set\n\nfrom modules.program_graph import (\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\nfrom .codegraph_core import CodeGraph, CGEntity, CGEdge\nfrom .repo_graph import RepoGraph, artifact_uri, program_id_for_repo, parse_program_uri\n\n\ndef _entity_uri(program_id: str, cg: CGEntity) -> str:\n if cg.kind == \"module\" or cg.kind == \"test_module\":\n resource = cg.name\n kind = \"module\"\n else:\n resource = cg.id.split(\"py:\", 1)[-1]\n kind = cg.kind\n return f\"program://{program_id}/{kind}/{resource}#L{cg.start_line}-L{cg.end_line}\"\n\n\nclass PythonRepoGraph(RepoGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n super().__init__(repo_root, ignore=ignore)\n self._cg = CodeGraph(self.repo_root, ignore=ignore).build()\n self._ent_cache: Optional[List[Entity]] = None\n self._edge_cache: Optional[List[Edge]] = None\n self._ent_by_id: Dict[str, CGEntity] = {e.id: e for e in self._cg.entities_by_id.values()}\n self._ids_by_name: Dict[str, List[str]] = {}\n for e in self._cg.entities():\n self._ids_by_name.setdefault(e.name.lower(), []).append(e.id)\n\n def entities(self) -> Iterable[Entity]:\n if self._ent_cache is None:\n out: List[Entity] = []\n for cg in self._cg.entities():\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n self._ent_cache = out\n return list(self._ent_cache)\n\n def edges(self) -> Iterable[Edge]:\n if self._edge_cache is None:\n out: List[Edge] = []\n seen: Set[Tuple[str, str, str]] = set()\n for ce in self._cg.edges():\n key = (ce.src, ce.dst, ce.type)\n if key in seen:\n continue\n seen.add(key)\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n self._edge_cache = out\n return list(self._edge_cache)\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n t = (token or \"\").strip()\n if not t:\n return []\n ids = self._cg.find_identifier_ids(t) or []\n out: List[Tuple[EntityId, Span]] = []\n for eid in ids:\n cg = self._ent_by_id.get(eid)\n if not cg:\n continue\n out.append((eid, Span(start_line=int(cg.start_line), end_line=int(cg.end_line))))\n return out\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"source\", \"artifact\"):\n return []\n out: List[Artifact] = []\n seen: Set[str] = set()\n for e in self._cg.entities():\n fp = e.file\n if fp in seen:\n continue\n seen.add(fp)\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n out.append(Artifact(uri=art_uri, type=\"source\", hash=self._cg.file_hash(fp), span=None))\n return out\n\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n if kind in (\"module\", \"function\", \"class\"):\n ent_id = f\"py:{resource}\"\n else:\n ent_id = f\"py:{resource}\"\n base = self._ent_by_id.get(ent_id)\n if not base:\n raise KeyError(f\"entity not found for uri resource: {resource}\")\n abs_fp = base.file\n a = int(span[0]) if span else int(base.start_line)\n b = int(span[1]) if span else int(base.end_line)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n return ResolvedAnchor(artifact_uri=art_uri, span=Span(start_line=a, end_line=b), hash=self._cg.file_hash(abs_fp))\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"PythonRepoGraph\":\n if not seeds or radius <= 0:\n return self\n # Build adjacency over current edge view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return _FilteredPythonRepoGraph(self, include_ids=seen)\n\n\nclass _FilteredPythonRepoGraph(PythonRepoGraph):\n def __init__(self, base: PythonRepoGraph, include_ids: Set[str]):\n # Shallow copy of references; restrict to include_ids\n self.repo_root = base.repo_root\n self.program_id = base.program_id\n self._cg = base._cg\n self._ent_by_id = base._ent_by_id\n self._ids_by_name = base._ids_by_name\n self._include_ids = set(include_ids)\n self._ent_cache = None\n self._edge_cache = None\n\n def entities(self) -> Iterable[Entity]:\n out: List[Entity] = []\n for cg in self._cg.entities():\n if cg.id not in self._include_ids:\n continue\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n return out\n\n def edges(self) -> Iterable[Edge]:\n out: List[Edge] = []\n inc = self._include_ids\n for ce in self._cg.edges():\n if (ce.src in inc) and (ce.dst in inc):\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n return out\n\n\n","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph._entity_uri","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph._entity_uri#L18-L25","kind":"function","name":"_entity_uri","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":18,"end_line":25,"context_start_line":1,"context_end_line":45,"code":"from __future__ import annotations\n\nimport os\nfrom typing import Iterable, List, Tuple, Dict, Optional, Set\n\nfrom modules.program_graph import (\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\nfrom .codegraph_core import CodeGraph, CGEntity, CGEdge\nfrom .repo_graph import RepoGraph, artifact_uri, program_id_for_repo, parse_program_uri\n\n\ndef _entity_uri(program_id: str, cg: CGEntity) -> str:\n if cg.kind == \"module\" or cg.kind == \"test_module\":\n resource = cg.name\n kind = \"module\"\n else:\n resource = cg.id.split(\"py:\", 1)[-1]\n kind = cg.kind\n return f\"program://{program_id}/{kind}/{resource}#L{cg.start_line}-L{cg.end_line}\"\n\n\nclass PythonRepoGraph(RepoGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n super().__init__(repo_root, ignore=ignore)\n self._cg = CodeGraph(self.repo_root, ignore=ignore).build()\n self._ent_cache: Optional[List[Entity]] = None\n self._edge_cache: Optional[List[Edge]] = None\n self._ent_by_id: Dict[str, CGEntity] = {e.id: e for e in self._cg.entities_by_id.values()}\n self._ids_by_name: Dict[str, List[str]] = {}\n for e in self._cg.entities():\n self._ids_by_name.setdefault(e.name.lower(), []).append(e.id)\n\n def entities(self) -> Iterable[Entity]:\n if self._ent_cache is None:\n out: List[Entity] = []\n for cg in self._cg.entities():\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n self._ent_cache = out","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph.PythonRepoGraph","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.python_repo_graph.PythonRepoGraph#L28-L122","kind":"class","name":"PythonRepoGraph","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":28,"end_line":122,"context_start_line":8,"context_end_line":142,"code":" Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\nfrom .codegraph_core import CodeGraph, CGEntity, CGEdge\nfrom .repo_graph import RepoGraph, artifact_uri, program_id_for_repo, parse_program_uri\n\n\ndef _entity_uri(program_id: str, cg: CGEntity) -> str:\n if cg.kind == \"module\" or cg.kind == \"test_module\":\n resource = cg.name\n kind = \"module\"\n else:\n resource = cg.id.split(\"py:\", 1)[-1]\n kind = cg.kind\n return f\"program://{program_id}/{kind}/{resource}#L{cg.start_line}-L{cg.end_line}\"\n\n\nclass PythonRepoGraph(RepoGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n super().__init__(repo_root, ignore=ignore)\n self._cg = CodeGraph(self.repo_root, ignore=ignore).build()\n self._ent_cache: Optional[List[Entity]] = None\n self._edge_cache: Optional[List[Edge]] = None\n self._ent_by_id: Dict[str, CGEntity] = {e.id: e for e in self._cg.entities_by_id.values()}\n self._ids_by_name: Dict[str, List[str]] = {}\n for e in self._cg.entities():\n self._ids_by_name.setdefault(e.name.lower(), []).append(e.id)\n\n def entities(self) -> Iterable[Entity]:\n if self._ent_cache is None:\n out: List[Entity] = []\n for cg in self._cg.entities():\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n self._ent_cache = out\n return list(self._ent_cache)\n\n def edges(self) -> Iterable[Edge]:\n if self._edge_cache is None:\n out: List[Edge] = []\n seen: Set[Tuple[str, str, str]] = set()\n for ce in self._cg.edges():\n key = (ce.src, ce.dst, ce.type)\n if key in seen:\n continue\n seen.add(key)\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n self._edge_cache = out\n return list(self._edge_cache)\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n t = (token or \"\").strip()\n if not t:\n return []\n ids = self._cg.find_identifier_ids(t) or []\n out: List[Tuple[EntityId, Span]] = []\n for eid in ids:\n cg = self._ent_by_id.get(eid)\n if not cg:\n continue\n out.append((eid, Span(start_line=int(cg.start_line), end_line=int(cg.end_line))))\n return out\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"source\", \"artifact\"):\n return []\n out: List[Artifact] = []\n seen: Set[str] = set()\n for e in self._cg.entities():\n fp = e.file\n if fp in seen:\n continue\n seen.add(fp)\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n out.append(Artifact(uri=art_uri, type=\"source\", hash=self._cg.file_hash(fp), span=None))\n return out\n\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n if kind in (\"module\", \"function\", \"class\"):\n ent_id = f\"py:{resource}\"\n else:\n ent_id = f\"py:{resource}\"\n base = self._ent_by_id.get(ent_id)\n if not base:\n raise KeyError(f\"entity not found for uri resource: {resource}\")\n abs_fp = base.file\n a = int(span[0]) if span else int(base.start_line)\n b = int(span[1]) if span else int(base.end_line)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n return ResolvedAnchor(artifact_uri=art_uri, span=Span(start_line=a, end_line=b), hash=self._cg.file_hash(abs_fp))\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"PythonRepoGraph\":\n if not seeds or radius <= 0:\n return self\n # Build adjacency over current edge view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return _FilteredPythonRepoGraph(self, include_ids=seen)\n\n\nclass _FilteredPythonRepoGraph(PythonRepoGraph):\n def __init__(self, base: PythonRepoGraph, include_ids: Set[str]):\n # Shallow copy of references; restrict to include_ids\n self.repo_root = base.repo_root\n self.program_id = base.program_id\n self._cg = base._cg\n self._ent_by_id = base._ent_by_id\n self._ids_by_name = base._ids_by_name\n self._include_ids = set(include_ids)\n self._ent_cache = None\n self._edge_cache = None\n\n def entities(self) -> Iterable[Entity]:\n out: List[Entity] = []\n for cg in self._cg.entities():\n if cg.id not in self._include_ids:\n continue\n uri = _entity_uri(self.program_id, cg)","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph._FilteredPythonRepoGraph","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.python_repo_graph._FilteredPythonRepoGraph#L125-L152","kind":"class","name":"_FilteredPythonRepoGraph","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":125,"end_line":152,"context_start_line":105,"context_end_line":155,"code":" if not seeds or radius <= 0:\n return self\n # Build adjacency over current edge view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return _FilteredPythonRepoGraph(self, include_ids=seen)\n\n\nclass _FilteredPythonRepoGraph(PythonRepoGraph):\n def __init__(self, base: PythonRepoGraph, include_ids: Set[str]):\n # Shallow copy of references; restrict to include_ids\n self.repo_root = base.repo_root\n self.program_id = base.program_id\n self._cg = base._cg\n self._ent_by_id = base._ent_by_id\n self._ids_by_name = base._ids_by_name\n self._include_ids = set(include_ids)\n self._ent_cache = None\n self._edge_cache = None\n\n def entities(self) -> Iterable[Entity]:\n out: List[Entity] = []\n for cg in self._cg.entities():\n if cg.id not in self._include_ids:\n continue\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n return out\n\n def edges(self) -> Iterable[Edge]:\n out: List[Edge] = []\n inc = self._include_ids\n for ce in self._cg.edges():\n if (ce.src in inc) and (ce.dst in inc):\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n return out\n\n\n","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph.__init__#L126-L135","kind":"function","name":"__init__","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":126,"end_line":135,"context_start_line":106,"context_end_line":155,"code":" return self\n # Build adjacency over current edge view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return _FilteredPythonRepoGraph(self, include_ids=seen)\n\n\nclass _FilteredPythonRepoGraph(PythonRepoGraph):\n def __init__(self, base: PythonRepoGraph, include_ids: Set[str]):\n # Shallow copy of references; restrict to include_ids\n self.repo_root = base.repo_root\n self.program_id = base.program_id\n self._cg = base._cg\n self._ent_by_id = base._ent_by_id\n self._ids_by_name = base._ids_by_name\n self._include_ids = set(include_ids)\n self._ent_cache = None\n self._edge_cache = None\n\n def entities(self) -> Iterable[Entity]:\n out: List[Entity] = []\n for cg in self._cg.entities():\n if cg.id not in self._include_ids:\n continue\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n return out\n\n def edges(self) -> Iterable[Edge]:\n out: List[Edge] = []\n inc = self._include_ids\n for ce in self._cg.edges():\n if (ce.src in inc) and (ce.dst in inc):\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n return out\n\n\n","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph.entities","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph.entities#L137-L144","kind":"function","name":"entities","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":137,"end_line":144,"context_start_line":117,"context_end_line":155,"code":" for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return _FilteredPythonRepoGraph(self, include_ids=seen)\n\n\nclass _FilteredPythonRepoGraph(PythonRepoGraph):\n def __init__(self, base: PythonRepoGraph, include_ids: Set[str]):\n # Shallow copy of references; restrict to include_ids\n self.repo_root = base.repo_root\n self.program_id = base.program_id\n self._cg = base._cg\n self._ent_by_id = base._ent_by_id\n self._ids_by_name = base._ids_by_name\n self._include_ids = set(include_ids)\n self._ent_cache = None\n self._edge_cache = None\n\n def entities(self) -> Iterable[Entity]:\n out: List[Entity] = []\n for cg in self._cg.entities():\n if cg.id not in self._include_ids:\n continue\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n return out\n\n def edges(self) -> Iterable[Edge]:\n out: List[Edge] = []\n inc = self._include_ids\n for ce in self._cg.edges():\n if (ce.src in inc) and (ce.dst in inc):\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n return out\n\n\n","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph.edges","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph.edges#L146-L152","kind":"function","name":"edges","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":146,"end_line":152,"context_start_line":126,"context_end_line":155,"code":" def __init__(self, base: PythonRepoGraph, include_ids: Set[str]):\n # Shallow copy of references; restrict to include_ids\n self.repo_root = base.repo_root\n self.program_id = base.program_id\n self._cg = base._cg\n self._ent_by_id = base._ent_by_id\n self._ids_by_name = base._ids_by_name\n self._include_ids = set(include_ids)\n self._ent_cache = None\n self._edge_cache = None\n\n def entities(self) -> Iterable[Entity]:\n out: List[Entity] = []\n for cg in self._cg.entities():\n if cg.id not in self._include_ids:\n continue\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n return out\n\n def edges(self) -> Iterable[Edge]:\n out: List[Edge] = []\n inc = self._include_ids\n for ce in self._cg.edges():\n if (ce.src in inc) and (ce.dst in inc):\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n return out\n\n\n","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph.search_refs","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph.search_refs#L61-L72","kind":"function","name":"search_refs","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":61,"end_line":72,"context_start_line":41,"context_end_line":92,"code":" out: List[Entity] = []\n for cg in self._cg.entities():\n uri = _entity_uri(self.program_id, cg)\n out.append(Entity(uri=uri, id=cg.id, kind=cg.kind if cg.kind != \"test_module\" else \"module\", name=cg.name, owner=cg.owner or None, labels=None))\n self._ent_cache = out\n return list(self._ent_cache)\n\n def edges(self) -> Iterable[Edge]:\n if self._edge_cache is None:\n out: List[Edge] = []\n seen: Set[Tuple[str, str, str]] = set()\n for ce in self._cg.edges():\n key = (ce.src, ce.dst, ce.type)\n if key in seen:\n continue\n seen.add(key)\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n self._edge_cache = out\n return list(self._edge_cache)\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n t = (token or \"\").strip()\n if not t:\n return []\n ids = self._cg.find_identifier_ids(t) or []\n out: List[Tuple[EntityId, Span]] = []\n for eid in ids:\n cg = self._ent_by_id.get(eid)\n if not cg:\n continue\n out.append((eid, Span(start_line=int(cg.start_line), end_line=int(cg.end_line))))\n return out\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"source\", \"artifact\"):\n return []\n out: List[Artifact] = []\n seen: Set[str] = set()\n for e in self._cg.entities():\n fp = e.file\n if fp in seen:\n continue\n seen.add(fp)\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n out.append(Artifact(uri=art_uri, type=\"source\", hash=self._cg.file_hash(fp), span=None))\n return out\n\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n if kind in (\"module\", \"function\", \"class\"):\n ent_id = f\"py:{resource}\"\n else:","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph.artifacts","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph.artifacts#L74-L87","kind":"function","name":"artifacts","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":74,"end_line":87,"context_start_line":54,"context_end_line":107,"code":" if key in seen:\n continue\n seen.add(key)\n out.append(Edge(src=ce.src, dst=ce.dst, type=ce.type, meta=None))\n self._edge_cache = out\n return list(self._edge_cache)\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n t = (token or \"\").strip()\n if not t:\n return []\n ids = self._cg.find_identifier_ids(t) or []\n out: List[Tuple[EntityId, Span]] = []\n for eid in ids:\n cg = self._ent_by_id.get(eid)\n if not cg:\n continue\n out.append((eid, Span(start_line=int(cg.start_line), end_line=int(cg.end_line))))\n return out\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"source\", \"artifact\"):\n return []\n out: List[Artifact] = []\n seen: Set[str] = set()\n for e in self._cg.entities():\n fp = e.file\n if fp in seen:\n continue\n seen.add(fp)\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n out.append(Artifact(uri=art_uri, type=\"source\", hash=self._cg.file_hash(fp), span=None))\n return out\n\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n if kind in (\"module\", \"function\", \"class\"):\n ent_id = f\"py:{resource}\"\n else:\n ent_id = f\"py:{resource}\"\n base = self._ent_by_id.get(ent_id)\n if not base:\n raise KeyError(f\"entity not found for uri resource: {resource}\")\n abs_fp = base.file\n a = int(span[0]) if span else int(base.start_line)\n b = int(span[1]) if span else int(base.end_line)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n return ResolvedAnchor(artifact_uri=art_uri, span=Span(start_line=a, end_line=b), hash=self._cg.file_hash(abs_fp))\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"PythonRepoGraph\":\n if not seeds or radius <= 0:\n return self\n # Build adjacency over current edge view","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph._resolve_entity_uri","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph._resolve_entity_uri#L89-L102","kind":"function","name":"_resolve_entity_uri","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":89,"end_line":102,"context_start_line":69,"context_end_line":122,"code":" if not cg:\n continue\n out.append((eid, Span(start_line=int(cg.start_line), end_line=int(cg.end_line))))\n return out\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"source\", \"artifact\"):\n return []\n out: List[Artifact] = []\n seen: Set[str] = set()\n for e in self._cg.entities():\n fp = e.file\n if fp in seen:\n continue\n seen.add(fp)\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n out.append(Artifact(uri=art_uri, type=\"source\", hash=self._cg.file_hash(fp), span=None))\n return out\n\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n if kind in (\"module\", \"function\", \"class\"):\n ent_id = f\"py:{resource}\"\n else:\n ent_id = f\"py:{resource}\"\n base = self._ent_by_id.get(ent_id)\n if not base:\n raise KeyError(f\"entity not found for uri resource: {resource}\")\n abs_fp = base.file\n a = int(span[0]) if span else int(base.start_line)\n b = int(span[1]) if span else int(base.end_line)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n return ResolvedAnchor(artifact_uri=art_uri, span=Span(start_line=a, end_line=b), hash=self._cg.file_hash(abs_fp))\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"PythonRepoGraph\":\n if not seeds or radius <= 0:\n return self\n # Build adjacency over current edge view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return _FilteredPythonRepoGraph(self, include_ids=seen)","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.python_repo_graph.subgraph","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.python_repo_graph.subgraph#L104-L122","kind":"function","name":"subgraph","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":104,"end_line":122,"context_start_line":84,"context_end_line":142,"code":" rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n out.append(Artifact(uri=art_uri, type=\"source\", hash=self._cg.file_hash(fp), span=None))\n return out\n\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n if kind in (\"module\", \"function\", \"class\"):\n ent_id = f\"py:{resource}\"\n else:\n ent_id = f\"py:{resource}\"\n base = self._ent_by_id.get(ent_id)\n if not base:\n raise KeyError(f\"entity not found for uri resource: {resource}\")\n abs_fp = base.file\n a = int(span[0]) if span else int(base.start_line)\n b = int(span[1]) if span else int(base.end_line)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n art_uri = artifact_uri(self.program_id, rel)\n return ResolvedAnchor(artifact_uri=art_uri, span=Span(start_line=a, end_line=b), hash=self._cg.file_hash(abs_fp))\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"PythonRepoGraph\":\n if not seeds or radius <= 0:\n return self\n # Build adjacency over current edge view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return _FilteredPythonRepoGraph(self, include_ids=seen)\n\n\nclass _FilteredPythonRepoGraph(PythonRepoGraph):\n def __init__(self, base: PythonRepoGraph, include_ids: Set[str]):\n # Shallow copy of references; restrict to include_ids\n self.repo_root = base.repo_root\n self.program_id = base.program_id\n self._cg = base._cg\n self._ent_by_id = base._ent_by_id\n self._ids_by_name = base._ids_by_name\n self._include_ids = set(include_ids)\n self._ent_cache = None\n self._edge_cache = None\n\n def entities(self) -> Iterable[Entity]:\n out: List[Entity] = []\n for cg in self._cg.entities():\n if cg.id not in self._include_ids:\n continue\n uri = _entity_uri(self.program_id, cg)","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.repo_graph#L1-L133","kind":"module","name":"examples.scripts.repo_graph","path":"examples/scripts/repo_graph.py","language":"python","start_line":1,"end_line":133,"context_start_line":1,"context_end_line":133,"code":"from __future__ import annotations\n\nimport os\nimport re\nimport hashlib\nfrom typing import Iterable, List, Tuple, Dict, Optional, Set\n\nfrom modules.program_graph import (\n ProgramGraph,\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\n\n\ndef program_id_for_repo(repo_root: str) -> str:\n base = os.path.basename(os.path.abspath(repo_root)) or \"repo\"\n return base\n\n\ndef artifact_uri(program_id: str, rel_path: str) -> str:\n rel = rel_path.replace(\"\\\\\", \"/\").lstrip(\"/\")\n return f\"program://{program_id}/artifact/{rel}\"\n\n\ndef parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return self # default view is whole repo; subclasses may return filtered views\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"artifact\", \"source\"):\n return []\n out: List[Artifact] = []\n for fp in self._discover_files(self.repo_root, self.ignore_rules):\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n out.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"source\", hash=self._hash_for(fp), span=None))\n return out\n\n def resolve(self, uri: str) -> ResolvedAnchor:\n pid, kind, res, span = parse_program_uri(uri)\n if pid != self.program_id:\n raise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n if kind == \"artifact\":\n abs_fp = os.path.abspath(os.path.join(self.repo_root, res))\n if not os.path.isfile(abs_fp):\n raise FileNotFoundError(f\"artifact not found: {abs_fp}\")\n a = int(span[0]) if span else 1\n b = int(span[1]) if span else self._safe_count_lines(abs_fp)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n return ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=self._hash_for(abs_fp))\n # Let subclass handle entity URIs\n return self._resolve_entity_uri(kind, res, span)\n\n # Hooks for subclasses\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n\n # Utilities\n def _discover_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n if any(ig and ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _hash_for(self, abs_file: str) -> str:\n if abs_file in self._file_hash:\n return self._file_hash[abs_file]\n try:\n with open(abs_file, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[abs_file] = h\n return h\n\n","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.program_id_for_repo","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.program_id_for_repo#L19-L21","kind":"function","name":"program_id_for_repo","path":"examples/scripts/repo_graph.py","language":"python","start_line":19,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"from __future__ import annotations\n\nimport os\nimport re\nimport hashlib\nfrom typing import Iterable, List, Tuple, Dict, Optional, Set\n\nfrom modules.program_graph import (\n ProgramGraph,\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\n\n\ndef program_id_for_repo(repo_root: str) -> str:\n base = os.path.basename(os.path.abspath(repo_root)) or \"repo\"\n return base\n\n\ndef artifact_uri(program_id: str, rel_path: str) -> str:\n rel = rel_path.replace(\"\\\\\", \"/\").lstrip(\"/\")\n return f\"program://{program_id}/artifact/{rel}\"\n\n\ndef parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.artifact_uri","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.artifact_uri#L24-L26","kind":"function","name":"artifact_uri","path":"examples/scripts/repo_graph.py","language":"python","start_line":24,"end_line":26,"context_start_line":4,"context_end_line":46,"code":"import re\nimport hashlib\nfrom typing import Iterable, List, Tuple, Dict, Optional, Set\n\nfrom modules.program_graph import (\n ProgramGraph,\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\n\n\ndef program_id_for_repo(repo_root: str) -> str:\n base = os.path.basename(os.path.abspath(repo_root)) or \"repo\"\n return base\n\n\ndef artifact_uri(program_id: str, rel_path: str) -> str:\n rel = rel_path.replace(\"\\\\\", \"/\").lstrip(\"/\")\n return f\"program://{program_id}/artifact/{rel}\"\n\n\ndef parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.parse_program_uri","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.parse_program_uri#L29-L35","kind":"function","name":"parse_program_uri","path":"examples/scripts/repo_graph.py","language":"python","start_line":29,"end_line":35,"context_start_line":9,"context_end_line":55,"code":" ProgramGraph,\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\n\n\ndef program_id_for_repo(repo_root: str) -> str:\n base = os.path.basename(os.path.abspath(repo_root)) or \"repo\"\n return base\n\n\ndef artifact_uri(program_id: str, rel_path: str) -> str:\n rel = rel_path.replace(\"\\\\\", \"/\").lstrip(\"/\")\n return f\"program://{program_id}/artifact/{rel}\"\n\n\ndef parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.RepoGraph","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.repo_graph.RepoGraph#L38-L131","kind":"class","name":"RepoGraph","path":"examples/scripts/repo_graph.py","language":"python","start_line":38,"end_line":131,"context_start_line":18,"context_end_line":133,"code":"\ndef program_id_for_repo(repo_root: str) -> str:\n base = os.path.basename(os.path.abspath(repo_root)) or \"repo\"\n return base\n\n\ndef artifact_uri(program_id: str, rel_path: str) -> str:\n rel = rel_path.replace(\"\\\\\", \"/\").lstrip(\"/\")\n return f\"program://{program_id}/artifact/{rel}\"\n\n\ndef parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return self # default view is whole repo; subclasses may return filtered views\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"artifact\", \"source\"):\n return []\n out: List[Artifact] = []\n for fp in self._discover_files(self.repo_root, self.ignore_rules):\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n out.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"source\", hash=self._hash_for(fp), span=None))\n return out\n\n def resolve(self, uri: str) -> ResolvedAnchor:\n pid, kind, res, span = parse_program_uri(uri)\n if pid != self.program_id:\n raise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n if kind == \"artifact\":\n abs_fp = os.path.abspath(os.path.join(self.repo_root, res))\n if not os.path.isfile(abs_fp):\n raise FileNotFoundError(f\"artifact not found: {abs_fp}\")\n a = int(span[0]) if span else 1\n b = int(span[1]) if span else self._safe_count_lines(abs_fp)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n return ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=self._hash_for(abs_fp))\n # Let subclass handle entity URIs\n return self._resolve_entity_uri(kind, res, span)\n\n # Hooks for subclasses\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n\n # Utilities\n def _discover_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n if any(ig and ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _hash_for(self, abs_file: str) -> str:\n if abs_file in self._file_hash:\n return self._file_hash[abs_file]\n try:\n with open(abs_file, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[abs_file] = h\n return h\n\n","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.__init__#L39-L43","kind":"function","name":"__init__","path":"examples/scripts/repo_graph.py","language":"python","start_line":39,"end_line":43,"context_start_line":19,"context_end_line":63,"code":"def program_id_for_repo(repo_root: str) -> str:\n base = os.path.basename(os.path.abspath(repo_root)) or \"repo\"\n return base\n\n\ndef artifact_uri(program_id: str, rel_path: str) -> str:\n rel = rel_path.replace(\"\\\\\", \"/\").lstrip(\"/\")\n return f\"program://{program_id}/artifact/{rel}\"\n\n\ndef parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.entities","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.entities#L46-L47","kind":"function","name":"entities","path":"examples/scripts/repo_graph.py","language":"python","start_line":46,"end_line":47,"context_start_line":26,"context_end_line":67,"code":" return f\"program://{program_id}/artifact/{rel}\"\n\n\ndef parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.edges","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.edges#L49-L50","kind":"function","name":"edges","path":"examples/scripts/repo_graph.py","language":"python","start_line":49,"end_line":50,"context_start_line":29,"context_end_line":70,"code":"def parse_program_uri(uri: str) -> Tuple[str, str, str, Optional[Tuple[int, int]]]:\n m = re.match(r\"^program://([^/]+)/([^/]+)/(.+?)(?:#L(\\d+)-L(\\d+))?$\", uri)\n if not m:\n raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.search_refs","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.search_refs#L52-L53","kind":"function","name":"search_refs","path":"examples/scripts/repo_graph.py","language":"python","start_line":52,"end_line":53,"context_start_line":32,"context_end_line":73,"code":" raise ValueError(f\"invalid program uri: {uri}\")\n pid, kind, res, a, b = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)\n span = (int(a), int(b)) if (a and b) else None\n return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return self # default view is whole repo; subclasses may return filtered views","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.subgraph","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.subgraph#L55-L73","kind":"function","name":"subgraph","path":"examples/scripts/repo_graph.py","language":"python","start_line":55,"end_line":73,"context_start_line":35,"context_end_line":93,"code":" return pid, kind, res, span\n\n\nclass RepoGraph(ProgramGraph):\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.repo_root = os.path.abspath(repo_root)\n self.program_id = program_id_for_repo(self.repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self._file_hash: Dict[str, str] = {}\n\n # ProgramGraph defaults (repo-agnostic)\n def entities(self) -> Iterable[Entity]:\n return []\n\n def edges(self) -> Iterable[Edge]:\n return []\n\n def search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n return []\n\n def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return self # default view is whole repo; subclasses may return filtered views\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"artifact\", \"source\"):\n return []\n out: List[Artifact] = []\n for fp in self._discover_files(self.repo_root, self.ignore_rules):\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n out.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"source\", hash=self._hash_for(fp), span=None))\n return out\n\n def resolve(self, uri: str) -> ResolvedAnchor:\n pid, kind, res, span = parse_program_uri(uri)\n if pid != self.program_id:\n raise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n if kind == \"artifact\":\n abs_fp = os.path.abspath(os.path.join(self.repo_root, res))\n if not os.path.isfile(abs_fp):\n raise FileNotFoundError(f\"artifact not found: {abs_fp}\")\n a = int(span[0]) if span else 1\n b = int(span[1]) if span else self._safe_count_lines(abs_fp)","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.artifacts","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.artifacts#L75-L82","kind":"function","name":"artifacts","path":"examples/scripts/repo_graph.py","language":"python","start_line":75,"end_line":82,"context_start_line":55,"context_end_line":102,"code":" def subgraph(self, seeds: List[EntityId], radius: int) -> \"ProgramGraph\":\n if not seeds or radius <= 0:\n return self\n # Generic BFS over current edges view\n adj: Dict[str, List[str]] = {}\n for e in self.edges():\n adj.setdefault(e.src, []).append(e.dst)\n adj.setdefault(e.dst, []).append(e.src)\n cur = set(seeds)\n seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return self # default view is whole repo; subclasses may return filtered views\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"artifact\", \"source\"):\n return []\n out: List[Artifact] = []\n for fp in self._discover_files(self.repo_root, self.ignore_rules):\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n out.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"source\", hash=self._hash_for(fp), span=None))\n return out\n\n def resolve(self, uri: str) -> ResolvedAnchor:\n pid, kind, res, span = parse_program_uri(uri)\n if pid != self.program_id:\n raise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n if kind == \"artifact\":\n abs_fp = os.path.abspath(os.path.join(self.repo_root, res))\n if not os.path.isfile(abs_fp):\n raise FileNotFoundError(f\"artifact not found: {abs_fp}\")\n a = int(span[0]) if span else 1\n b = int(span[1]) if span else self._safe_count_lines(abs_fp)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n return ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=self._hash_for(abs_fp))\n # Let subclass handle entity URIs\n return self._resolve_entity_uri(kind, res, span)\n\n # Hooks for subclasses\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph.resolve","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph.resolve#L84-L97","kind":"function","name":"resolve","path":"examples/scripts/repo_graph.py","language":"python","start_line":84,"end_line":97,"context_start_line":64,"context_end_line":117,"code":" seen = set(cur)\n for _ in range(max(1, radius)):\n nxt: Set[str] = set()\n for s in list(cur):\n for nb in adj.get(s, []):\n if nb not in seen:\n seen.add(nb)\n nxt.add(nb)\n cur = nxt\n return self # default view is whole repo; subclasses may return filtered views\n\n def artifacts(self, kind: str) -> Iterable[Artifact]:\n if kind not in (\"artifact\", \"source\"):\n return []\n out: List[Artifact] = []\n for fp in self._discover_files(self.repo_root, self.ignore_rules):\n rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n out.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"source\", hash=self._hash_for(fp), span=None))\n return out\n\n def resolve(self, uri: str) -> ResolvedAnchor:\n pid, kind, res, span = parse_program_uri(uri)\n if pid != self.program_id:\n raise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n if kind == \"artifact\":\n abs_fp = os.path.abspath(os.path.join(self.repo_root, res))\n if not os.path.isfile(abs_fp):\n raise FileNotFoundError(f\"artifact not found: {abs_fp}\")\n a = int(span[0]) if span else 1\n b = int(span[1]) if span else self._safe_count_lines(abs_fp)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n return ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=self._hash_for(abs_fp))\n # Let subclass handle entity URIs\n return self._resolve_entity_uri(kind, res, span)\n\n # Hooks for subclasses\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n\n # Utilities\n def _discover_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n if any(ig and ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph._resolve_entity_uri","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph._resolve_entity_uri#L100-L101","kind":"function","name":"_resolve_entity_uri","path":"examples/scripts/repo_graph.py","language":"python","start_line":100,"end_line":101,"context_start_line":80,"context_end_line":121,"code":" rel = os.path.relpath(fp, self.repo_root).replace(\"\\\\\", \"/\")\n out.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"source\", hash=self._hash_for(fp), span=None))\n return out\n\n def resolve(self, uri: str) -> ResolvedAnchor:\n pid, kind, res, span = parse_program_uri(uri)\n if pid != self.program_id:\n raise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n if kind == \"artifact\":\n abs_fp = os.path.abspath(os.path.join(self.repo_root, res))\n if not os.path.isfile(abs_fp):\n raise FileNotFoundError(f\"artifact not found: {abs_fp}\")\n a = int(span[0]) if span else 1\n b = int(span[1]) if span else self._safe_count_lines(abs_fp)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n return ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=self._hash_for(abs_fp))\n # Let subclass handle entity URIs\n return self._resolve_entity_uri(kind, res, span)\n\n # Hooks for subclasses\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n\n # Utilities\n def _discover_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n if any(ig and ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _hash_for(self, abs_file: str) -> str:","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph._discover_files","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph._discover_files#L104-L112","kind":"function","name":"_discover_files","path":"examples/scripts/repo_graph.py","language":"python","start_line":104,"end_line":112,"context_start_line":84,"context_end_line":132,"code":" def resolve(self, uri: str) -> ResolvedAnchor:\n pid, kind, res, span = parse_program_uri(uri)\n if pid != self.program_id:\n raise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n if kind == \"artifact\":\n abs_fp = os.path.abspath(os.path.join(self.repo_root, res))\n if not os.path.isfile(abs_fp):\n raise FileNotFoundError(f\"artifact not found: {abs_fp}\")\n a = int(span[0]) if span else 1\n b = int(span[1]) if span else self._safe_count_lines(abs_fp)\n rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n return ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=self._hash_for(abs_fp))\n # Let subclass handle entity URIs\n return self._resolve_entity_uri(kind, res, span)\n\n # Hooks for subclasses\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n\n # Utilities\n def _discover_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n if any(ig and ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _hash_for(self, abs_file: str) -> str:\n if abs_file in self._file_hash:\n return self._file_hash[abs_file]\n try:\n with open(abs_file, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[abs_file] = h\n return h\n","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph._safe_count_lines","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph._safe_count_lines#L114-L119","kind":"function","name":"_safe_count_lines","path":"examples/scripts/repo_graph.py","language":"python","start_line":114,"end_line":119,"context_start_line":94,"context_end_line":133,"code":" rel = os.path.relpath(abs_fp, self.repo_root).replace(\"\\\\\", \"/\")\n return ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=self._hash_for(abs_fp))\n # Let subclass handle entity URIs\n return self._resolve_entity_uri(kind, res, span)\n\n # Hooks for subclasses\n def _resolve_entity_uri(self, kind: str, resource: str, span: Optional[Tuple[int, int]]) -> ResolvedAnchor:\n raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n\n # Utilities\n def _discover_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n if any(ig and ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _hash_for(self, abs_file: str) -> str:\n if abs_file in self._file_hash:\n return self._file_hash[abs_file]\n try:\n with open(abs_file, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[abs_file] = h\n return h\n\n","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.repo_graph._hash_for","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.repo_graph._hash_for#L121-L131","kind":"function","name":"_hash_for","path":"examples/scripts/repo_graph.py","language":"python","start_line":121,"end_line":131,"context_start_line":101,"context_end_line":133,"code":" raise KeyError(f\"unrecognized entity uri for kind={kind}, resource={resource}\")\n\n # Utilities\n def _discover_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n if any(ig and ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _hash_for(self, abs_file: str) -> str:\n if abs_file in self._file_hash:\n return self._file_hash[abs_file]\n try:\n with open(abs_file, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[abs_file] = h\n return h\n\n","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.tool_budget_allocator","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.tool_budget_allocator#L1-L15","kind":"module","name":"examples.scripts.tool_budget_allocator","path":"examples/scripts/tool_budget_allocator.py","language":"python","start_line":1,"end_line":15,"context_start_line":1,"context_end_line":15,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict\n\n\ndef allocate(budget: Dict[str, Any], expected_gain: float) -> Dict[str, Any]:\n\t\"\"\"\n\tGreedily allocate tokens/time by expected verifier-gain per unit cost (placeholder).\n\t\"\"\"\n\tout = dict(budget)\n\tout[\"allocated\"] = {\"tokens\": int(budget.get(\"tokens\", 64000) * 0.5)}\n\tout[\"expected_gain\"] = float(expected_gain)\n\treturn out\n\n","source_hash":"d16141703bdf7b6026a5c99db975c92fe798868286dc91eaa92586786bf28e71","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.tool_budget_allocator.allocate","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.tool_budget_allocator.allocate#L6-L13","kind":"function","name":"allocate","path":"examples/scripts/tool_budget_allocator.py","language":"python","start_line":6,"end_line":13,"context_start_line":1,"context_end_line":15,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict\n\n\ndef allocate(budget: Dict[str, Any], expected_gain: float) -> Dict[str, Any]:\n\t\"\"\"\n\tGreedily allocate tokens/time by expected verifier-gain per unit cost (placeholder).\n\t\"\"\"\n\tout = dict(budget)\n\tout[\"allocated\"] = {\"tokens\": int(budget.get(\"tokens\", 64000) * 0.5)}\n\tout[\"expected_gain\"] = float(expected_gain)\n\treturn out\n\n","source_hash":"d16141703bdf7b6026a5c99db975c92fe798868286dc91eaa92586786bf28e71","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.dataset_graph#L1-L132","kind":"module","name":"examples.scripts.dataset_graph","path":"examples/scripts/dataset_graph.py","language":"python","start_line":1,"end_line":132,"context_start_line":1,"context_end_line":132,"code":"from __future__ import annotations\n\nimport os\nfrom typing import Iterable, List, Tuple, Dict, Optional\n\nfrom modules.program_graph import (\n\tProgramGraph,\n\tEntity,\n\tEdge,\n\tArtifact,\n\tSpan,\n\tResolvedAnchor,\n\tEntityId,\n)\nfrom .repo_graph import artifact_uri, program_id_for_repo, parse_program_uri\n\n\nclass DatasetProgramGraph(ProgramGraph):\n\t\"\"\"\n\tMinimal Dataset→ProgramGraph backend.\n\tRepresents benchmark datasets (MBPP, SWE-bench, etc.) as entities:\n\t- problem://\n\t- reference://\n\t- verifier://\n\tArtifacts point to source JSON or local cache paths when available.\n\t\"\"\"\n\n\tdef __init__(self, root: str, datasets: Optional[List[str]] = None):\n\t\tself.root = os.path.abspath(root)\n\t\tself.program_id = program_id_for_repo(self.root)\n\t\tself._datasets = datasets or [\"mbpp:train\", \"mbpp:test\", \"swe_bench:train\"]\n\t\t# Tiny synthetic index for smoke use\n\t\tself._index: Dict[str, Dict[str, str]] = {}\n\t\tfor spec in self._datasets:\n\t\t\tds, split = (spec.split(\":\", 1) + [\"train\"])[:2]\n\t\t\tkey = f\"{ds}/{split}/0001\"\n\t\t\tself._index[key] = {\n\t\t\t\t\"dataset\": ds,\n\t\t\t\t\"split\": split,\n\t\t\t\t\"problem\": f\"Solve sample problem 0001 from {ds}/{split}\",\n\t\t\t\t\"reference\": \"def solution(x):\\n\\treturn x\\n\",\n\t\t\t\t\"verifier\": \"python verify_0001.py\",\n\t\t\t\t\"artifact_rel\": f\"datasets/{ds}/{split}/0001.json\"\n\t\t\t}\n\n\tdef entities(self) -> Iterable[Entity]:\n\t\tout: List[Entity] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tds = meta[\"dataset\"]\n\t\t\tsplit = meta[\"split\"]\n\t\t\t# problem\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/problem/{key}\",\n\t\t\t\tid=f\"ds:problem:{key}\",\n\t\t\t\tkind=\"problem\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# reference\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/reference/{key}\",\n\t\t\t\tid=f\"ds:reference:{key}\",\n\t\t\t\tkind=\"reference\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# verifier\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/verifier/{key}\",\n\t\t\t\tid=f\"ds:verifier:{key}\",\n\t\t\t\tkind=\"verifier\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\treturn out\n\n\tdef edges(self) -> Iterable[Edge]:\n\t\tout: List[Edge] = []\n\t\tfor key in self._index.keys():\n\t\t\tpid = f\"ds:problem:{key}\"\n\t\t\trid = f\"ds:reference:{key}\"\n\t\t\tvid = f\"ds:verifier:{key}\"\n\t\t\tout.append(Edge(src=pid, dst=rid, type=\"has_reference\", meta=None))\n\t\t\tout.append(Edge(src=pid, dst=vid, type=\"has_verifier\", meta=None))\n\t\treturn out\n\n\tdef artifacts(self, kind: str) -> Iterable[Artifact]:\n\t\tif kind not in (\"artifact\", \"source\"):\n\t\t\treturn []\n\t\tout: List[Artifact] = []\n\t\tfor key, meta in self._index.items():\n\t\t\trel = meta.get(\"artifact_rel\") or f\"datasets/{key}.json\"\n\t\t\tabs_fp = os.path.join(self.root, rel)\n\t\t\t# We may not have real files; just issue URIs with empty hash\n\t\t\tout.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"artifact\", hash=\"\", span=None))\n\t\treturn out\n\n\tdef search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n\t\tt = (token or \"\").strip().lower()\n\t\tif not t:\n\t\t\treturn []\n\t\tout: List[Tuple[EntityId, Span]] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tif t in (meta.get(\"problem\") or \"\").lower():\n\t\t\t\tout.append((f\"ds:problem:{key}\", Span(start_line=1, end_line=1)))\n\t\treturn out\n\n\tdef resolve(self, uri: str) -> ResolvedAnchor:\n\t\tpid, kind, res, span = parse_program_uri(uri)\n\t\tif pid != self.program_id:\n\t\t\traise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n\t\t# Map entity URIs to synthetic artifact anchors\n\t\tif kind in (\"problem\", \"reference\", \"verifier\"):\n\t\t\tkey = res\n\t\t\tbase = f\"datasets/{key}.json\"\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else a\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, base), span=Span(start_line=a, end_line=b), hash=\"\")\n\t\t# Fallback to artifact resolution\n\t\tif kind == \"artifact\":\n\t\t\t# Defer to generic artifact handling through RepoGraph helpers (inline)\n\t\t\tabs_fp = os.path.abspath(os.path.join(self.root, res))\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else max(1, a)\n\t\t\trel = res.replace(\"\\\\\", \"/\")\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=\"\")\n\t\traise KeyError(f\"unrecognized entity uri for kind={kind}, resource={res}\")\n\n","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph.DatasetProgramGraph","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.dataset_graph.DatasetProgramGraph#L18-L130","kind":"class","name":"DatasetProgramGraph","path":"examples/scripts/dataset_graph.py","language":"python","start_line":18,"end_line":130,"context_start_line":1,"context_end_line":132,"code":"from __future__ import annotations\n\nimport os\nfrom typing import Iterable, List, Tuple, Dict, Optional\n\nfrom modules.program_graph import (\n\tProgramGraph,\n\tEntity,\n\tEdge,\n\tArtifact,\n\tSpan,\n\tResolvedAnchor,\n\tEntityId,\n)\nfrom .repo_graph import artifact_uri, program_id_for_repo, parse_program_uri\n\n\nclass DatasetProgramGraph(ProgramGraph):\n\t\"\"\"\n\tMinimal Dataset→ProgramGraph backend.\n\tRepresents benchmark datasets (MBPP, SWE-bench, etc.) as entities:\n\t- problem://\n\t- reference://\n\t- verifier://\n\tArtifacts point to source JSON or local cache paths when available.\n\t\"\"\"\n\n\tdef __init__(self, root: str, datasets: Optional[List[str]] = None):\n\t\tself.root = os.path.abspath(root)\n\t\tself.program_id = program_id_for_repo(self.root)\n\t\tself._datasets = datasets or [\"mbpp:train\", \"mbpp:test\", \"swe_bench:train\"]\n\t\t# Tiny synthetic index for smoke use\n\t\tself._index: Dict[str, Dict[str, str]] = {}\n\t\tfor spec in self._datasets:\n\t\t\tds, split = (spec.split(\":\", 1) + [\"train\"])[:2]\n\t\t\tkey = f\"{ds}/{split}/0001\"\n\t\t\tself._index[key] = {\n\t\t\t\t\"dataset\": ds,\n\t\t\t\t\"split\": split,\n\t\t\t\t\"problem\": f\"Solve sample problem 0001 from {ds}/{split}\",\n\t\t\t\t\"reference\": \"def solution(x):\\n\\treturn x\\n\",\n\t\t\t\t\"verifier\": \"python verify_0001.py\",\n\t\t\t\t\"artifact_rel\": f\"datasets/{ds}/{split}/0001.json\"\n\t\t\t}\n\n\tdef entities(self) -> Iterable[Entity]:\n\t\tout: List[Entity] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tds = meta[\"dataset\"]\n\t\t\tsplit = meta[\"split\"]\n\t\t\t# problem\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/problem/{key}\",\n\t\t\t\tid=f\"ds:problem:{key}\",\n\t\t\t\tkind=\"problem\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# reference\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/reference/{key}\",\n\t\t\t\tid=f\"ds:reference:{key}\",\n\t\t\t\tkind=\"reference\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# verifier\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/verifier/{key}\",\n\t\t\t\tid=f\"ds:verifier:{key}\",\n\t\t\t\tkind=\"verifier\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\treturn out\n\n\tdef edges(self) -> Iterable[Edge]:\n\t\tout: List[Edge] = []\n\t\tfor key in self._index.keys():\n\t\t\tpid = f\"ds:problem:{key}\"\n\t\t\trid = f\"ds:reference:{key}\"\n\t\t\tvid = f\"ds:verifier:{key}\"\n\t\t\tout.append(Edge(src=pid, dst=rid, type=\"has_reference\", meta=None))\n\t\t\tout.append(Edge(src=pid, dst=vid, type=\"has_verifier\", meta=None))\n\t\treturn out\n\n\tdef artifacts(self, kind: str) -> Iterable[Artifact]:\n\t\tif kind not in (\"artifact\", \"source\"):\n\t\t\treturn []\n\t\tout: List[Artifact] = []\n\t\tfor key, meta in self._index.items():\n\t\t\trel = meta.get(\"artifact_rel\") or f\"datasets/{key}.json\"\n\t\t\tabs_fp = os.path.join(self.root, rel)\n\t\t\t# We may not have real files; just issue URIs with empty hash\n\t\t\tout.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"artifact\", hash=\"\", span=None))\n\t\treturn out\n\n\tdef search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n\t\tt = (token or \"\").strip().lower()\n\t\tif not t:\n\t\t\treturn []\n\t\tout: List[Tuple[EntityId, Span]] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tif t in (meta.get(\"problem\") or \"\").lower():\n\t\t\t\tout.append((f\"ds:problem:{key}\", Span(start_line=1, end_line=1)))\n\t\treturn out\n\n\tdef resolve(self, uri: str) -> ResolvedAnchor:\n\t\tpid, kind, res, span = parse_program_uri(uri)\n\t\tif pid != self.program_id:\n\t\t\traise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n\t\t# Map entity URIs to synthetic artifact anchors\n\t\tif kind in (\"problem\", \"reference\", \"verifier\"):\n\t\t\tkey = res\n\t\t\tbase = f\"datasets/{key}.json\"\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else a\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, base), span=Span(start_line=a, end_line=b), hash=\"\")\n\t\t# Fallback to artifact resolution\n\t\tif kind == \"artifact\":\n\t\t\t# Defer to generic artifact handling through RepoGraph helpers (inline)\n\t\t\tabs_fp = os.path.abspath(os.path.join(self.root, res))\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else max(1, a)\n\t\t\trel = res.replace(\"\\\\\", \"/\")\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=\"\")\n\t\traise KeyError(f\"unrecognized entity uri for kind={kind}, resource={res}\")\n\n","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.dataset_graph.__init__#L28-L44","kind":"function","name":"__init__","path":"examples/scripts/dataset_graph.py","language":"python","start_line":28,"end_line":44,"context_start_line":8,"context_end_line":64,"code":"\tEntity,\n\tEdge,\n\tArtifact,\n\tSpan,\n\tResolvedAnchor,\n\tEntityId,\n)\nfrom .repo_graph import artifact_uri, program_id_for_repo, parse_program_uri\n\n\nclass DatasetProgramGraph(ProgramGraph):\n\t\"\"\"\n\tMinimal Dataset→ProgramGraph backend.\n\tRepresents benchmark datasets (MBPP, SWE-bench, etc.) as entities:\n\t- problem://\n\t- reference://\n\t- verifier://\n\tArtifacts point to source JSON or local cache paths when available.\n\t\"\"\"\n\n\tdef __init__(self, root: str, datasets: Optional[List[str]] = None):\n\t\tself.root = os.path.abspath(root)\n\t\tself.program_id = program_id_for_repo(self.root)\n\t\tself._datasets = datasets or [\"mbpp:train\", \"mbpp:test\", \"swe_bench:train\"]\n\t\t# Tiny synthetic index for smoke use\n\t\tself._index: Dict[str, Dict[str, str]] = {}\n\t\tfor spec in self._datasets:\n\t\t\tds, split = (spec.split(\":\", 1) + [\"train\"])[:2]\n\t\t\tkey = f\"{ds}/{split}/0001\"\n\t\t\tself._index[key] = {\n\t\t\t\t\"dataset\": ds,\n\t\t\t\t\"split\": split,\n\t\t\t\t\"problem\": f\"Solve sample problem 0001 from {ds}/{split}\",\n\t\t\t\t\"reference\": \"def solution(x):\\n\\treturn x\\n\",\n\t\t\t\t\"verifier\": \"python verify_0001.py\",\n\t\t\t\t\"artifact_rel\": f\"datasets/{ds}/{split}/0001.json\"\n\t\t\t}\n\n\tdef entities(self) -> Iterable[Entity]:\n\t\tout: List[Entity] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tds = meta[\"dataset\"]\n\t\t\tsplit = meta[\"split\"]\n\t\t\t# problem\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/problem/{key}\",\n\t\t\t\tid=f\"ds:problem:{key}\",\n\t\t\t\tkind=\"problem\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# reference\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/reference/{key}\",\n\t\t\t\tid=f\"ds:reference:{key}\",\n\t\t\t\tkind=\"reference\",","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph.entities","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.dataset_graph.entities#L46-L78","kind":"function","name":"entities","path":"examples/scripts/dataset_graph.py","language":"python","start_line":46,"end_line":78,"context_start_line":26,"context_end_line":98,"code":"\t\"\"\"\n\n\tdef __init__(self, root: str, datasets: Optional[List[str]] = None):\n\t\tself.root = os.path.abspath(root)\n\t\tself.program_id = program_id_for_repo(self.root)\n\t\tself._datasets = datasets or [\"mbpp:train\", \"mbpp:test\", \"swe_bench:train\"]\n\t\t# Tiny synthetic index for smoke use\n\t\tself._index: Dict[str, Dict[str, str]] = {}\n\t\tfor spec in self._datasets:\n\t\t\tds, split = (spec.split(\":\", 1) + [\"train\"])[:2]\n\t\t\tkey = f\"{ds}/{split}/0001\"\n\t\t\tself._index[key] = {\n\t\t\t\t\"dataset\": ds,\n\t\t\t\t\"split\": split,\n\t\t\t\t\"problem\": f\"Solve sample problem 0001 from {ds}/{split}\",\n\t\t\t\t\"reference\": \"def solution(x):\\n\\treturn x\\n\",\n\t\t\t\t\"verifier\": \"python verify_0001.py\",\n\t\t\t\t\"artifact_rel\": f\"datasets/{ds}/{split}/0001.json\"\n\t\t\t}\n\n\tdef entities(self) -> Iterable[Entity]:\n\t\tout: List[Entity] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tds = meta[\"dataset\"]\n\t\t\tsplit = meta[\"split\"]\n\t\t\t# problem\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/problem/{key}\",\n\t\t\t\tid=f\"ds:problem:{key}\",\n\t\t\t\tkind=\"problem\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# reference\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/reference/{key}\",\n\t\t\t\tid=f\"ds:reference:{key}\",\n\t\t\t\tkind=\"reference\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# verifier\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/verifier/{key}\",\n\t\t\t\tid=f\"ds:verifier:{key}\",\n\t\t\t\tkind=\"verifier\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\treturn out\n\n\tdef edges(self) -> Iterable[Edge]:\n\t\tout: List[Edge] = []\n\t\tfor key in self._index.keys():\n\t\t\tpid = f\"ds:problem:{key}\"\n\t\t\trid = f\"ds:reference:{key}\"\n\t\t\tvid = f\"ds:verifier:{key}\"\n\t\t\tout.append(Edge(src=pid, dst=rid, type=\"has_reference\", meta=None))\n\t\t\tout.append(Edge(src=pid, dst=vid, type=\"has_verifier\", meta=None))\n\t\treturn out\n\n\tdef artifacts(self, kind: str) -> Iterable[Artifact]:\n\t\tif kind not in (\"artifact\", \"source\"):\n\t\t\treturn []\n\t\tout: List[Artifact] = []\n\t\tfor key, meta in self._index.items():\n\t\t\trel = meta.get(\"artifact_rel\") or f\"datasets/{key}.json\"\n\t\t\tabs_fp = os.path.join(self.root, rel)\n\t\t\t# We may not have real files; just issue URIs with empty hash\n\t\t\tout.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"artifact\", hash=\"\", span=None))","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph.edges","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.dataset_graph.edges#L80-L88","kind":"function","name":"edges","path":"examples/scripts/dataset_graph.py","language":"python","start_line":80,"end_line":88,"context_start_line":60,"context_end_line":108,"code":"\t\t\t# reference\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/reference/{key}\",\n\t\t\t\tid=f\"ds:reference:{key}\",\n\t\t\t\tkind=\"reference\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\t\t# verifier\n\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/verifier/{key}\",\n\t\t\t\tid=f\"ds:verifier:{key}\",\n\t\t\t\tkind=\"verifier\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\treturn out\n\n\tdef edges(self) -> Iterable[Edge]:\n\t\tout: List[Edge] = []\n\t\tfor key in self._index.keys():\n\t\t\tpid = f\"ds:problem:{key}\"\n\t\t\trid = f\"ds:reference:{key}\"\n\t\t\tvid = f\"ds:verifier:{key}\"\n\t\t\tout.append(Edge(src=pid, dst=rid, type=\"has_reference\", meta=None))\n\t\t\tout.append(Edge(src=pid, dst=vid, type=\"has_verifier\", meta=None))\n\t\treturn out\n\n\tdef artifacts(self, kind: str) -> Iterable[Artifact]:\n\t\tif kind not in (\"artifact\", \"source\"):\n\t\t\treturn []\n\t\tout: List[Artifact] = []\n\t\tfor key, meta in self._index.items():\n\t\t\trel = meta.get(\"artifact_rel\") or f\"datasets/{key}.json\"\n\t\t\tabs_fp = os.path.join(self.root, rel)\n\t\t\t# We may not have real files; just issue URIs with empty hash\n\t\t\tout.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"artifact\", hash=\"\", span=None))\n\t\treturn out\n\n\tdef search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n\t\tt = (token or \"\").strip().lower()\n\t\tif not t:\n\t\t\treturn []\n\t\tout: List[Tuple[EntityId, Span]] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tif t in (meta.get(\"problem\") or \"\").lower():\n\t\t\t\tout.append((f\"ds:problem:{key}\", Span(start_line=1, end_line=1)))","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph.artifacts","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.dataset_graph.artifacts#L90-L99","kind":"function","name":"artifacts","path":"examples/scripts/dataset_graph.py","language":"python","start_line":90,"end_line":99,"context_start_line":70,"context_end_line":119,"code":"\t\t\tout.append(Entity(\n\t\t\t\turi=f\"program://{self.program_id}/verifier/{key}\",\n\t\t\t\tid=f\"ds:verifier:{key}\",\n\t\t\t\tkind=\"verifier\",\n\t\t\t\tname=f\"{ds}:{split}:{key.split('/')[-1]}\",\n\t\t\t\towner=None,\n\t\t\t\tlabels=None,\n\t\t\t))\n\t\treturn out\n\n\tdef edges(self) -> Iterable[Edge]:\n\t\tout: List[Edge] = []\n\t\tfor key in self._index.keys():\n\t\t\tpid = f\"ds:problem:{key}\"\n\t\t\trid = f\"ds:reference:{key}\"\n\t\t\tvid = f\"ds:verifier:{key}\"\n\t\t\tout.append(Edge(src=pid, dst=rid, type=\"has_reference\", meta=None))\n\t\t\tout.append(Edge(src=pid, dst=vid, type=\"has_verifier\", meta=None))\n\t\treturn out\n\n\tdef artifacts(self, kind: str) -> Iterable[Artifact]:\n\t\tif kind not in (\"artifact\", \"source\"):\n\t\t\treturn []\n\t\tout: List[Artifact] = []\n\t\tfor key, meta in self._index.items():\n\t\t\trel = meta.get(\"artifact_rel\") or f\"datasets/{key}.json\"\n\t\t\tabs_fp = os.path.join(self.root, rel)\n\t\t\t# We may not have real files; just issue URIs with empty hash\n\t\t\tout.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"artifact\", hash=\"\", span=None))\n\t\treturn out\n\n\tdef search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n\t\tt = (token or \"\").strip().lower()\n\t\tif not t:\n\t\t\treturn []\n\t\tout: List[Tuple[EntityId, Span]] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tif t in (meta.get(\"problem\") or \"\").lower():\n\t\t\t\tout.append((f\"ds:problem:{key}\", Span(start_line=1, end_line=1)))\n\t\treturn out\n\n\tdef resolve(self, uri: str) -> ResolvedAnchor:\n\t\tpid, kind, res, span = parse_program_uri(uri)\n\t\tif pid != self.program_id:\n\t\t\traise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n\t\t# Map entity URIs to synthetic artifact anchors\n\t\tif kind in (\"problem\", \"reference\", \"verifier\"):\n\t\t\tkey = res\n\t\t\tbase = f\"datasets/{key}.json\"\n\t\t\ta = int(span[0]) if span else 1","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph.search_refs","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.dataset_graph.search_refs#L101-L109","kind":"function","name":"search_refs","path":"examples/scripts/dataset_graph.py","language":"python","start_line":101,"end_line":109,"context_start_line":81,"context_end_line":129,"code":"\t\tout: List[Edge] = []\n\t\tfor key in self._index.keys():\n\t\t\tpid = f\"ds:problem:{key}\"\n\t\t\trid = f\"ds:reference:{key}\"\n\t\t\tvid = f\"ds:verifier:{key}\"\n\t\t\tout.append(Edge(src=pid, dst=rid, type=\"has_reference\", meta=None))\n\t\t\tout.append(Edge(src=pid, dst=vid, type=\"has_verifier\", meta=None))\n\t\treturn out\n\n\tdef artifacts(self, kind: str) -> Iterable[Artifact]:\n\t\tif kind not in (\"artifact\", \"source\"):\n\t\t\treturn []\n\t\tout: List[Artifact] = []\n\t\tfor key, meta in self._index.items():\n\t\t\trel = meta.get(\"artifact_rel\") or f\"datasets/{key}.json\"\n\t\t\tabs_fp = os.path.join(self.root, rel)\n\t\t\t# We may not have real files; just issue URIs with empty hash\n\t\t\tout.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"artifact\", hash=\"\", span=None))\n\t\treturn out\n\n\tdef search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n\t\tt = (token or \"\").strip().lower()\n\t\tif not t:\n\t\t\treturn []\n\t\tout: List[Tuple[EntityId, Span]] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tif t in (meta.get(\"problem\") or \"\").lower():\n\t\t\t\tout.append((f\"ds:problem:{key}\", Span(start_line=1, end_line=1)))\n\t\treturn out\n\n\tdef resolve(self, uri: str) -> ResolvedAnchor:\n\t\tpid, kind, res, span = parse_program_uri(uri)\n\t\tif pid != self.program_id:\n\t\t\traise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n\t\t# Map entity URIs to synthetic artifact anchors\n\t\tif kind in (\"problem\", \"reference\", \"verifier\"):\n\t\t\tkey = res\n\t\t\tbase = f\"datasets/{key}.json\"\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else a\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, base), span=Span(start_line=a, end_line=b), hash=\"\")\n\t\t# Fallback to artifact resolution\n\t\tif kind == \"artifact\":\n\t\t\t# Defer to generic artifact handling through RepoGraph helpers (inline)\n\t\t\tabs_fp = os.path.abspath(os.path.join(self.root, res))\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else max(1, a)\n\t\t\trel = res.replace(\"\\\\\", \"/\")\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=\"\")","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.dataset_graph.resolve","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.dataset_graph.resolve#L111-L130","kind":"function","name":"resolve","path":"examples/scripts/dataset_graph.py","language":"python","start_line":111,"end_line":130,"context_start_line":91,"context_end_line":132,"code":"\t\tif kind not in (\"artifact\", \"source\"):\n\t\t\treturn []\n\t\tout: List[Artifact] = []\n\t\tfor key, meta in self._index.items():\n\t\t\trel = meta.get(\"artifact_rel\") or f\"datasets/{key}.json\"\n\t\t\tabs_fp = os.path.join(self.root, rel)\n\t\t\t# We may not have real files; just issue URIs with empty hash\n\t\t\tout.append(Artifact(uri=artifact_uri(self.program_id, rel), type=\"artifact\", hash=\"\", span=None))\n\t\treturn out\n\n\tdef search_refs(self, token: str) -> Iterable[Tuple[EntityId, Span]]:\n\t\tt = (token or \"\").strip().lower()\n\t\tif not t:\n\t\t\treturn []\n\t\tout: List[Tuple[EntityId, Span]] = []\n\t\tfor key, meta in self._index.items():\n\t\t\tif t in (meta.get(\"problem\") or \"\").lower():\n\t\t\t\tout.append((f\"ds:problem:{key}\", Span(start_line=1, end_line=1)))\n\t\treturn out\n\n\tdef resolve(self, uri: str) -> ResolvedAnchor:\n\t\tpid, kind, res, span = parse_program_uri(uri)\n\t\tif pid != self.program_id:\n\t\t\traise ValueError(f\"program id mismatch: {pid} != {self.program_id}\")\n\t\t# Map entity URIs to synthetic artifact anchors\n\t\tif kind in (\"problem\", \"reference\", \"verifier\"):\n\t\t\tkey = res\n\t\t\tbase = f\"datasets/{key}.json\"\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else a\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, base), span=Span(start_line=a, end_line=b), hash=\"\")\n\t\t# Fallback to artifact resolution\n\t\tif kind == \"artifact\":\n\t\t\t# Defer to generic artifact handling through RepoGraph helpers (inline)\n\t\t\tabs_fp = os.path.abspath(os.path.join(self.root, res))\n\t\t\ta = int(span[0]) if span else 1\n\t\t\tb = int(span[1]) if span else max(1, a)\n\t\t\trel = res.replace(\"\\\\\", \"/\")\n\t\t\treturn ResolvedAnchor(artifact_uri=artifact_uri(self.program_id, rel), span=Span(start_line=a, end_line=b), hash=\"\")\n\t\traise KeyError(f\"unrecognized entity uri for kind={kind}, resource={res}\")\n\n","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.registry#L1-L73","kind":"module","name":"examples.scripts.registry","path":"examples/scripts/registry.py","language":"python","start_line":1,"end_line":73,"context_start_line":1,"context_end_line":73,"code":"from __future__ import annotations\n\nimport json\nimport os\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\n\ndef _default_registry_path() -> str:\n\tartifacts_dir = examples_dir / \"artifacts\"\n\tartifacts_dir.mkdir(parents=True, exist_ok=True)\n\treturn str(artifacts_dir / \"adapter_registry.json\")\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\tif not os.path.isfile(path):\n\t\treturn {}\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read()) or {}\n\texcept Exception:\n\t\treturn {}\n\n\ndef _write_json(path: str, obj: Dict[str, Any]) -> None:\n\tos.makedirs(os.path.dirname(path), exist_ok=True)\n\twith open(path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\n\ndef load_registry(registry_path: Optional[str] = None) -> Dict[str, Any]:\n\trp = registry_path or _default_registry_path()\n\tdata = _read_json(rp)\n\tif not isinstance(data, dict):\n\t\treturn {}\n\treturn data\n\n\ndef save_registry(data: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\trp = registry_path or _default_registry_path()\n\t_write_json(rp, data)\n\treturn rp\n\n\ndef register_adapter(adapter_id: str, meta: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tnow = int(time.time())\n\trec = dict(meta)\n\trec.setdefault(\"created_ts\", now)\n\trec[\"updated_ts\"] = now\n\tdata[adapter_id] = rec\n\treturn save_registry(data, registry_path)\n\n\ndef remove_adapter(adapter_id: str, registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tif adapter_id in data:\n\t\tdel data[adapter_id]\n\treturn save_registry(data, registry_path)\n\n\ndef list_adapters(registry_path: Optional[str] = None) -> List[Dict[str, Any]]:\n\tdata = load_registry(registry_path)\n\tout: List[Dict[str, Any]] = []\n\tfor aid, rec in data.items():\n\t\tentry = dict(rec)\n\t\tentry[\"adapter_id\"] = aid\n\t\tout.append(entry)\n\treturn out\n\n\n","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry._default_registry_path","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry._default_registry_path#L10-L13","kind":"function","name":"_default_registry_path","path":"examples/scripts/registry.py","language":"python","start_line":10,"end_line":13,"context_start_line":1,"context_end_line":33,"code":"from __future__ import annotations\n\nimport json\nimport os\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\n\ndef _default_registry_path() -> str:\n\tartifacts_dir = examples_dir / \"artifacts\"\n\tartifacts_dir.mkdir(parents=True, exist_ok=True)\n\treturn str(artifacts_dir / \"adapter_registry.json\")\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\tif not os.path.isfile(path):\n\t\treturn {}\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read()) or {}\n\texcept Exception:\n\t\treturn {}\n\n\ndef _write_json(path: str, obj: Dict[str, Any]) -> None:\n\tos.makedirs(os.path.dirname(path), exist_ok=True)\n\twith open(path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\n\ndef load_registry(registry_path: Optional[str] = None) -> Dict[str, Any]:\n\trp = registry_path or _default_registry_path()","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry._read_json","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry._read_json#L16-L23","kind":"function","name":"_read_json","path":"examples/scripts/registry.py","language":"python","start_line":16,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"from __future__ import annotations\n\nimport json\nimport os\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\n\ndef _default_registry_path() -> str:\n\tartifacts_dir = examples_dir / \"artifacts\"\n\tartifacts_dir.mkdir(parents=True, exist_ok=True)\n\treturn str(artifacts_dir / \"adapter_registry.json\")\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\tif not os.path.isfile(path):\n\t\treturn {}\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read()) or {}\n\texcept Exception:\n\t\treturn {}\n\n\ndef _write_json(path: str, obj: Dict[str, Any]) -> None:\n\tos.makedirs(os.path.dirname(path), exist_ok=True)\n\twith open(path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\n\ndef load_registry(registry_path: Optional[str] = None) -> Dict[str, Any]:\n\trp = registry_path or _default_registry_path()\n\tdata = _read_json(rp)\n\tif not isinstance(data, dict):\n\t\treturn {}\n\treturn data\n\n\ndef save_registry(data: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\trp = registry_path or _default_registry_path()\n\t_write_json(rp, data)\n\treturn rp","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry._write_json","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry._write_json#L26-L29","kind":"function","name":"_write_json","path":"examples/scripts/registry.py","language":"python","start_line":26,"end_line":29,"context_start_line":6,"context_end_line":49,"code":"from pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\n\ndef _default_registry_path() -> str:\n\tartifacts_dir = examples_dir / \"artifacts\"\n\tartifacts_dir.mkdir(parents=True, exist_ok=True)\n\treturn str(artifacts_dir / \"adapter_registry.json\")\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\tif not os.path.isfile(path):\n\t\treturn {}\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read()) or {}\n\texcept Exception:\n\t\treturn {}\n\n\ndef _write_json(path: str, obj: Dict[str, Any]) -> None:\n\tos.makedirs(os.path.dirname(path), exist_ok=True)\n\twith open(path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\n\ndef load_registry(registry_path: Optional[str] = None) -> Dict[str, Any]:\n\trp = registry_path or _default_registry_path()\n\tdata = _read_json(rp)\n\tif not isinstance(data, dict):\n\t\treturn {}\n\treturn data\n\n\ndef save_registry(data: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\trp = registry_path or _default_registry_path()\n\t_write_json(rp, data)\n\treturn rp\n\n\ndef register_adapter(adapter_id: str, meta: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tnow = int(time.time())\n\trec = dict(meta)","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry.load_registry","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry.load_registry#L32-L37","kind":"function","name":"load_registry","path":"examples/scripts/registry.py","language":"python","start_line":32,"end_line":37,"context_start_line":12,"context_end_line":57,"code":"\tartifacts_dir.mkdir(parents=True, exist_ok=True)\n\treturn str(artifacts_dir / \"adapter_registry.json\")\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\tif not os.path.isfile(path):\n\t\treturn {}\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read()) or {}\n\texcept Exception:\n\t\treturn {}\n\n\ndef _write_json(path: str, obj: Dict[str, Any]) -> None:\n\tos.makedirs(os.path.dirname(path), exist_ok=True)\n\twith open(path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\n\ndef load_registry(registry_path: Optional[str] = None) -> Dict[str, Any]:\n\trp = registry_path or _default_registry_path()\n\tdata = _read_json(rp)\n\tif not isinstance(data, dict):\n\t\treturn {}\n\treturn data\n\n\ndef save_registry(data: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\trp = registry_path or _default_registry_path()\n\t_write_json(rp, data)\n\treturn rp\n\n\ndef register_adapter(adapter_id: str, meta: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tnow = int(time.time())\n\trec = dict(meta)\n\trec.setdefault(\"created_ts\", now)\n\trec[\"updated_ts\"] = now\n\tdata[adapter_id] = rec\n\treturn save_registry(data, registry_path)\n\n\ndef remove_adapter(adapter_id: str, registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry.save_registry","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry.save_registry#L40-L43","kind":"function","name":"save_registry","path":"examples/scripts/registry.py","language":"python","start_line":40,"end_line":43,"context_start_line":20,"context_end_line":63,"code":"\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read()) or {}\n\texcept Exception:\n\t\treturn {}\n\n\ndef _write_json(path: str, obj: Dict[str, Any]) -> None:\n\tos.makedirs(os.path.dirname(path), exist_ok=True)\n\twith open(path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\n\ndef load_registry(registry_path: Optional[str] = None) -> Dict[str, Any]:\n\trp = registry_path or _default_registry_path()\n\tdata = _read_json(rp)\n\tif not isinstance(data, dict):\n\t\treturn {}\n\treturn data\n\n\ndef save_registry(data: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\trp = registry_path or _default_registry_path()\n\t_write_json(rp, data)\n\treturn rp\n\n\ndef register_adapter(adapter_id: str, meta: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tnow = int(time.time())\n\trec = dict(meta)\n\trec.setdefault(\"created_ts\", now)\n\trec[\"updated_ts\"] = now\n\tdata[adapter_id] = rec\n\treturn save_registry(data, registry_path)\n\n\ndef remove_adapter(adapter_id: str, registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tif adapter_id in data:\n\t\tdel data[adapter_id]\n\treturn save_registry(data, registry_path)\n\n\ndef list_adapters(registry_path: Optional[str] = None) -> List[Dict[str, Any]]:","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry.register_adapter","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry.register_adapter#L46-L53","kind":"function","name":"register_adapter","path":"examples/scripts/registry.py","language":"python","start_line":46,"end_line":53,"context_start_line":26,"context_end_line":73,"code":"def _write_json(path: str, obj: Dict[str, Any]) -> None:\n\tos.makedirs(os.path.dirname(path), exist_ok=True)\n\twith open(path, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\n\ndef load_registry(registry_path: Optional[str] = None) -> Dict[str, Any]:\n\trp = registry_path or _default_registry_path()\n\tdata = _read_json(rp)\n\tif not isinstance(data, dict):\n\t\treturn {}\n\treturn data\n\n\ndef save_registry(data: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\trp = registry_path or _default_registry_path()\n\t_write_json(rp, data)\n\treturn rp\n\n\ndef register_adapter(adapter_id: str, meta: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tnow = int(time.time())\n\trec = dict(meta)\n\trec.setdefault(\"created_ts\", now)\n\trec[\"updated_ts\"] = now\n\tdata[adapter_id] = rec\n\treturn save_registry(data, registry_path)\n\n\ndef remove_adapter(adapter_id: str, registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tif adapter_id in data:\n\t\tdel data[adapter_id]\n\treturn save_registry(data, registry_path)\n\n\ndef list_adapters(registry_path: Optional[str] = None) -> List[Dict[str, Any]]:\n\tdata = load_registry(registry_path)\n\tout: List[Dict[str, Any]] = []\n\tfor aid, rec in data.items():\n\t\tentry = dict(rec)\n\t\tentry[\"adapter_id\"] = aid\n\t\tout.append(entry)\n\treturn out\n\n\n","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry.remove_adapter","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry.remove_adapter#L56-L60","kind":"function","name":"remove_adapter","path":"examples/scripts/registry.py","language":"python","start_line":56,"end_line":60,"context_start_line":36,"context_end_line":73,"code":"\t\treturn {}\n\treturn data\n\n\ndef save_registry(data: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\trp = registry_path or _default_registry_path()\n\t_write_json(rp, data)\n\treturn rp\n\n\ndef register_adapter(adapter_id: str, meta: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tnow = int(time.time())\n\trec = dict(meta)\n\trec.setdefault(\"created_ts\", now)\n\trec[\"updated_ts\"] = now\n\tdata[adapter_id] = rec\n\treturn save_registry(data, registry_path)\n\n\ndef remove_adapter(adapter_id: str, registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tif adapter_id in data:\n\t\tdel data[adapter_id]\n\treturn save_registry(data, registry_path)\n\n\ndef list_adapters(registry_path: Optional[str] = None) -> List[Dict[str, Any]]:\n\tdata = load_registry(registry_path)\n\tout: List[Dict[str, Any]] = []\n\tfor aid, rec in data.items():\n\t\tentry = dict(rec)\n\t\tentry[\"adapter_id\"] = aid\n\t\tout.append(entry)\n\treturn out\n\n\n","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.registry.list_adapters","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.registry.list_adapters#L63-L70","kind":"function","name":"list_adapters","path":"examples/scripts/registry.py","language":"python","start_line":63,"end_line":70,"context_start_line":43,"context_end_line":73,"code":"\treturn rp\n\n\ndef register_adapter(adapter_id: str, meta: Dict[str, Any], registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tnow = int(time.time())\n\trec = dict(meta)\n\trec.setdefault(\"created_ts\", now)\n\trec[\"updated_ts\"] = now\n\tdata[adapter_id] = rec\n\treturn save_registry(data, registry_path)\n\n\ndef remove_adapter(adapter_id: str, registry_path: Optional[str] = None) -> str:\n\tdata = load_registry(registry_path)\n\tif adapter_id in data:\n\t\tdel data[adapter_id]\n\treturn save_registry(data, registry_path)\n\n\ndef list_adapters(registry_path: Optional[str] = None) -> List[Dict[str, Any]]:\n\tdata = load_registry(registry_path)\n\tout: List[Dict[str, Any]] = []\n\tfor aid, rec in data.items():\n\t\tentry = dict(rec)\n\t\tentry[\"adapter_id\"] = aid\n\t\tout.append(entry)\n\treturn out\n\n\n","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.pca_core#L1-L78","kind":"module","name":"examples.scripts.pca_core","path":"examples/scripts/pca_core.py","language":"python","start_line":1,"end_line":78,"context_start_line":1,"context_end_line":78,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple, Iterable\n\n\n# Canonical PCA Interface (portable utility helpers)\n\ndef select_subgraph(goal: Dict[str, Any], program_facts: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tSelect(): question-aware selection of subgraph/segments/windows from the program.\n\tHeuristic: prefer recently changed files, failing spans, or symbols mentioned in goal text.\n\t\"\"\"\n\ttext = (goal.get(\"text\") or goal.get(\"prompt\") or \"\").lower()\n\treturn {\"windows\": [], \"symbols\": [], \"goal_terms\": text.split()[:16]}\n\n\ndef pack_with_anchors(sources: Iterable[Path], windows: List[Tuple[str, int, int]]) -> Dict[str, Any]:\n\t\"\"\"\n\tPack(): deterministic context packaging with anchored snippets (path:line windows).\n\t\"\"\"\n\tpacked = {\"anchors\": [], \"tokens_budget\": 0}\n\tfor rel, a, b in windows:\n\t\ttry:\n\t\t\tpacked[\"anchors\"].append({\"path\": rel, \"start_line\": int(a), \"end_line\": int(b)})\n\t\texcept Exception:\n\t\t\tcontinue\n\treturn packed\n\n\ndef embed_multifactor(features: Dict[str, Any]) -> List[float]:\n\t\"\"\"\n\tEmbed(): multi-factor embedding of program-specific features (schemas, contracts, graphs, traces).\n\tPlaceholder returns a small fixed-length vector for compatibility.\n\t\"\"\"\n\treturn [0.0, 0.0, 0.0, 1.0]\n\n\ndef adapt_lora_deltas(targets: List[str], rank: int = 8) -> Dict[str, Any]:\n\t\"\"\"\n\tAdapt(): synthesize/mix LoRA-like deltas for LM layer targets (attention/MLP) with a stable gating schedule.\n\t\"\"\"\n\treturn {\"targets\": list(targets), \"rank\": int(rank), \"gating\": \"stable\"}\n\n\ndef verify_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tVerify(): run program-native checks (lints/compile/tests/SQL dry-run/schema).\n\tHere we only check structure is non-empty for demo purposes.\n\t\"\"\"\n\tok = isinstance(outputs, dict) and len(outputs) > 0\n\treturn {\"ok\": bool(ok), \"summary\": \"ok\" if ok else \"empty\"}\n\n\ndef cite_outputs(outputs: Dict[str, Any], anchors: List[Dict[str, Any]]) -> Dict[str, Any]:\n\t\"\"\"\n\tCite(): append anchors to every claim; here we attach under a standard field if missing.\n\t\"\"\"\n\twith_cites = dict(outputs)\n\twith_cites.setdefault(\"citations\", anchors)\n\treturn with_cites\n\n\ndef log_minimal(goal: Dict[str, Any], outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tLog(): emit minimal, privacy-respecting telemetry for reproducibility and distillation.\n\t\"\"\"\n\treturn {\"goal_hash\": hash(str(goal)) % (10**9), \"outputs_keys\": list(outputs.keys())}\n\n\n@dataclass(frozen=True)\nclass Budget:\n\ttokens: int = 64000\n\twall_sec: int = 120\n\tci_min: int = 10\n\n","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.select_subgraph","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.pca_core.select_subgraph#L10-L16","kind":"function","name":"select_subgraph","path":"examples/scripts/pca_core.py","language":"python","start_line":10,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple, Iterable\n\n\n# Canonical PCA Interface (portable utility helpers)\n\ndef select_subgraph(goal: Dict[str, Any], program_facts: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tSelect(): question-aware selection of subgraph/segments/windows from the program.\n\tHeuristic: prefer recently changed files, failing spans, or symbols mentioned in goal text.\n\t\"\"\"\n\ttext = (goal.get(\"text\") or goal.get(\"prompt\") or \"\").lower()\n\treturn {\"windows\": [], \"symbols\": [], \"goal_terms\": text.split()[:16]}\n\n\ndef pack_with_anchors(sources: Iterable[Path], windows: List[Tuple[str, int, int]]) -> Dict[str, Any]:\n\t\"\"\"\n\tPack(): deterministic context packaging with anchored snippets (path:line windows).\n\t\"\"\"\n\tpacked = {\"anchors\": [], \"tokens_budget\": 0}\n\tfor rel, a, b in windows:\n\t\ttry:\n\t\t\tpacked[\"anchors\"].append({\"path\": rel, \"start_line\": int(a), \"end_line\": int(b)})\n\t\texcept Exception:\n\t\t\tcontinue\n\treturn packed\n\n\ndef embed_multifactor(features: Dict[str, Any]) -> List[float]:\n\t\"\"\"\n\tEmbed(): multi-factor embedding of program-specific features (schemas, contracts, graphs, traces).\n\tPlaceholder returns a small fixed-length vector for compatibility.\n\t\"\"\"","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.pack_with_anchors","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.pca_core.pack_with_anchors#L19-L29","kind":"function","name":"pack_with_anchors","path":"examples/scripts/pca_core.py","language":"python","start_line":19,"end_line":29,"context_start_line":1,"context_end_line":49,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple, Iterable\n\n\n# Canonical PCA Interface (portable utility helpers)\n\ndef select_subgraph(goal: Dict[str, Any], program_facts: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tSelect(): question-aware selection of subgraph/segments/windows from the program.\n\tHeuristic: prefer recently changed files, failing spans, or symbols mentioned in goal text.\n\t\"\"\"\n\ttext = (goal.get(\"text\") or goal.get(\"prompt\") or \"\").lower()\n\treturn {\"windows\": [], \"symbols\": [], \"goal_terms\": text.split()[:16]}\n\n\ndef pack_with_anchors(sources: Iterable[Path], windows: List[Tuple[str, int, int]]) -> Dict[str, Any]:\n\t\"\"\"\n\tPack(): deterministic context packaging with anchored snippets (path:line windows).\n\t\"\"\"\n\tpacked = {\"anchors\": [], \"tokens_budget\": 0}\n\tfor rel, a, b in windows:\n\t\ttry:\n\t\t\tpacked[\"anchors\"].append({\"path\": rel, \"start_line\": int(a), \"end_line\": int(b)})\n\t\texcept Exception:\n\t\t\tcontinue\n\treturn packed\n\n\ndef embed_multifactor(features: Dict[str, Any]) -> List[float]:\n\t\"\"\"\n\tEmbed(): multi-factor embedding of program-specific features (schemas, contracts, graphs, traces).\n\tPlaceholder returns a small fixed-length vector for compatibility.\n\t\"\"\"\n\treturn [0.0, 0.0, 0.0, 1.0]\n\n\ndef adapt_lora_deltas(targets: List[str], rank: int = 8) -> Dict[str, Any]:\n\t\"\"\"\n\tAdapt(): synthesize/mix LoRA-like deltas for LM layer targets (attention/MLP) with a stable gating schedule.\n\t\"\"\"\n\treturn {\"targets\": list(targets), \"rank\": int(rank), \"gating\": \"stable\"}\n\n\ndef verify_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tVerify(): run program-native checks (lints/compile/tests/SQL dry-run/schema).","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.embed_multifactor","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.pca_core.embed_multifactor#L32-L37","kind":"function","name":"embed_multifactor","path":"examples/scripts/pca_core.py","language":"python","start_line":32,"end_line":37,"context_start_line":12,"context_end_line":57,"code":"\tSelect(): question-aware selection of subgraph/segments/windows from the program.\n\tHeuristic: prefer recently changed files, failing spans, or symbols mentioned in goal text.\n\t\"\"\"\n\ttext = (goal.get(\"text\") or goal.get(\"prompt\") or \"\").lower()\n\treturn {\"windows\": [], \"symbols\": [], \"goal_terms\": text.split()[:16]}\n\n\ndef pack_with_anchors(sources: Iterable[Path], windows: List[Tuple[str, int, int]]) -> Dict[str, Any]:\n\t\"\"\"\n\tPack(): deterministic context packaging with anchored snippets (path:line windows).\n\t\"\"\"\n\tpacked = {\"anchors\": [], \"tokens_budget\": 0}\n\tfor rel, a, b in windows:\n\t\ttry:\n\t\t\tpacked[\"anchors\"].append({\"path\": rel, \"start_line\": int(a), \"end_line\": int(b)})\n\t\texcept Exception:\n\t\t\tcontinue\n\treturn packed\n\n\ndef embed_multifactor(features: Dict[str, Any]) -> List[float]:\n\t\"\"\"\n\tEmbed(): multi-factor embedding of program-specific features (schemas, contracts, graphs, traces).\n\tPlaceholder returns a small fixed-length vector for compatibility.\n\t\"\"\"\n\treturn [0.0, 0.0, 0.0, 1.0]\n\n\ndef adapt_lora_deltas(targets: List[str], rank: int = 8) -> Dict[str, Any]:\n\t\"\"\"\n\tAdapt(): synthesize/mix LoRA-like deltas for LM layer targets (attention/MLP) with a stable gating schedule.\n\t\"\"\"\n\treturn {\"targets\": list(targets), \"rank\": int(rank), \"gating\": \"stable\"}\n\n\ndef verify_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tVerify(): run program-native checks (lints/compile/tests/SQL dry-run/schema).\n\tHere we only check structure is non-empty for demo purposes.\n\t\"\"\"\n\tok = isinstance(outputs, dict) and len(outputs) > 0\n\treturn {\"ok\": bool(ok), \"summary\": \"ok\" if ok else \"empty\"}\n\n\ndef cite_outputs(outputs: Dict[str, Any], anchors: List[Dict[str, Any]]) -> Dict[str, Any]:\n\t\"\"\"","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.adapt_lora_deltas","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.pca_core.adapt_lora_deltas#L40-L44","kind":"function","name":"adapt_lora_deltas","path":"examples/scripts/pca_core.py","language":"python","start_line":40,"end_line":44,"context_start_line":20,"context_end_line":64,"code":"\t\"\"\"\n\tPack(): deterministic context packaging with anchored snippets (path:line windows).\n\t\"\"\"\n\tpacked = {\"anchors\": [], \"tokens_budget\": 0}\n\tfor rel, a, b in windows:\n\t\ttry:\n\t\t\tpacked[\"anchors\"].append({\"path\": rel, \"start_line\": int(a), \"end_line\": int(b)})\n\t\texcept Exception:\n\t\t\tcontinue\n\treturn packed\n\n\ndef embed_multifactor(features: Dict[str, Any]) -> List[float]:\n\t\"\"\"\n\tEmbed(): multi-factor embedding of program-specific features (schemas, contracts, graphs, traces).\n\tPlaceholder returns a small fixed-length vector for compatibility.\n\t\"\"\"\n\treturn [0.0, 0.0, 0.0, 1.0]\n\n\ndef adapt_lora_deltas(targets: List[str], rank: int = 8) -> Dict[str, Any]:\n\t\"\"\"\n\tAdapt(): synthesize/mix LoRA-like deltas for LM layer targets (attention/MLP) with a stable gating schedule.\n\t\"\"\"\n\treturn {\"targets\": list(targets), \"rank\": int(rank), \"gating\": \"stable\"}\n\n\ndef verify_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tVerify(): run program-native checks (lints/compile/tests/SQL dry-run/schema).\n\tHere we only check structure is non-empty for demo purposes.\n\t\"\"\"\n\tok = isinstance(outputs, dict) and len(outputs) > 0\n\treturn {\"ok\": bool(ok), \"summary\": \"ok\" if ok else \"empty\"}\n\n\ndef cite_outputs(outputs: Dict[str, Any], anchors: List[Dict[str, Any]]) -> Dict[str, Any]:\n\t\"\"\"\n\tCite(): append anchors to every claim; here we attach under a standard field if missing.\n\t\"\"\"\n\twith_cites = dict(outputs)\n\twith_cites.setdefault(\"citations\", anchors)\n\treturn with_cites\n\n","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.verify_outputs","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.pca_core.verify_outputs#L47-L53","kind":"function","name":"verify_outputs","path":"examples/scripts/pca_core.py","language":"python","start_line":47,"end_line":53,"context_start_line":27,"context_end_line":73,"code":"\t\texcept Exception:\n\t\t\tcontinue\n\treturn packed\n\n\ndef embed_multifactor(features: Dict[str, Any]) -> List[float]:\n\t\"\"\"\n\tEmbed(): multi-factor embedding of program-specific features (schemas, contracts, graphs, traces).\n\tPlaceholder returns a small fixed-length vector for compatibility.\n\t\"\"\"\n\treturn [0.0, 0.0, 0.0, 1.0]\n\n\ndef adapt_lora_deltas(targets: List[str], rank: int = 8) -> Dict[str, Any]:\n\t\"\"\"\n\tAdapt(): synthesize/mix LoRA-like deltas for LM layer targets (attention/MLP) with a stable gating schedule.\n\t\"\"\"\n\treturn {\"targets\": list(targets), \"rank\": int(rank), \"gating\": \"stable\"}\n\n\ndef verify_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tVerify(): run program-native checks (lints/compile/tests/SQL dry-run/schema).\n\tHere we only check structure is non-empty for demo purposes.\n\t\"\"\"\n\tok = isinstance(outputs, dict) and len(outputs) > 0\n\treturn {\"ok\": bool(ok), \"summary\": \"ok\" if ok else \"empty\"}\n\n\ndef cite_outputs(outputs: Dict[str, Any], anchors: List[Dict[str, Any]]) -> Dict[str, Any]:\n\t\"\"\"\n\tCite(): append anchors to every claim; here we attach under a standard field if missing.\n\t\"\"\"\n\twith_cites = dict(outputs)\n\twith_cites.setdefault(\"citations\", anchors)\n\treturn with_cites\n\n\ndef log_minimal(goal: Dict[str, Any], outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tLog(): emit minimal, privacy-respecting telemetry for reproducibility and distillation.\n\t\"\"\"\n\treturn {\"goal_hash\": hash(str(goal)) % (10**9), \"outputs_keys\": list(outputs.keys())}\n\n\n@dataclass(frozen=True)\nclass Budget:","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.cite_outputs","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.pca_core.cite_outputs#L56-L62","kind":"function","name":"cite_outputs","path":"examples/scripts/pca_core.py","language":"python","start_line":56,"end_line":62,"context_start_line":36,"context_end_line":78,"code":"\t\"\"\"\n\treturn [0.0, 0.0, 0.0, 1.0]\n\n\ndef adapt_lora_deltas(targets: List[str], rank: int = 8) -> Dict[str, Any]:\n\t\"\"\"\n\tAdapt(): synthesize/mix LoRA-like deltas for LM layer targets (attention/MLP) with a stable gating schedule.\n\t\"\"\"\n\treturn {\"targets\": list(targets), \"rank\": int(rank), \"gating\": \"stable\"}\n\n\ndef verify_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tVerify(): run program-native checks (lints/compile/tests/SQL dry-run/schema).\n\tHere we only check structure is non-empty for demo purposes.\n\t\"\"\"\n\tok = isinstance(outputs, dict) and len(outputs) > 0\n\treturn {\"ok\": bool(ok), \"summary\": \"ok\" if ok else \"empty\"}\n\n\ndef cite_outputs(outputs: Dict[str, Any], anchors: List[Dict[str, Any]]) -> Dict[str, Any]:\n\t\"\"\"\n\tCite(): append anchors to every claim; here we attach under a standard field if missing.\n\t\"\"\"\n\twith_cites = dict(outputs)\n\twith_cites.setdefault(\"citations\", anchors)\n\treturn with_cites\n\n\ndef log_minimal(goal: Dict[str, Any], outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tLog(): emit minimal, privacy-respecting telemetry for reproducibility and distillation.\n\t\"\"\"\n\treturn {\"goal_hash\": hash(str(goal)) % (10**9), \"outputs_keys\": list(outputs.keys())}\n\n\n@dataclass(frozen=True)\nclass Budget:\n\ttokens: int = 64000\n\twall_sec: int = 120\n\tci_min: int = 10\n\n","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.log_minimal","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.pca_core.log_minimal#L65-L69","kind":"function","name":"log_minimal","path":"examples/scripts/pca_core.py","language":"python","start_line":65,"end_line":69,"context_start_line":45,"context_end_line":78,"code":"\n\ndef verify_outputs(outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tVerify(): run program-native checks (lints/compile/tests/SQL dry-run/schema).\n\tHere we only check structure is non-empty for demo purposes.\n\t\"\"\"\n\tok = isinstance(outputs, dict) and len(outputs) > 0\n\treturn {\"ok\": bool(ok), \"summary\": \"ok\" if ok else \"empty\"}\n\n\ndef cite_outputs(outputs: Dict[str, Any], anchors: List[Dict[str, Any]]) -> Dict[str, Any]:\n\t\"\"\"\n\tCite(): append anchors to every claim; here we attach under a standard field if missing.\n\t\"\"\"\n\twith_cites = dict(outputs)\n\twith_cites.setdefault(\"citations\", anchors)\n\treturn with_cites\n\n\ndef log_minimal(goal: Dict[str, Any], outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tLog(): emit minimal, privacy-respecting telemetry for reproducibility and distillation.\n\t\"\"\"\n\treturn {\"goal_hash\": hash(str(goal)) % (10**9), \"outputs_keys\": list(outputs.keys())}\n\n\n@dataclass(frozen=True)\nclass Budget:\n\ttokens: int = 64000\n\twall_sec: int = 120\n\tci_min: int = 10\n\n","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.pca_core.Budget","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.pca_core.Budget#L73-L76","kind":"class","name":"Budget","path":"examples/scripts/pca_core.py","language":"python","start_line":73,"end_line":76,"context_start_line":53,"context_end_line":78,"code":"\treturn {\"ok\": bool(ok), \"summary\": \"ok\" if ok else \"empty\"}\n\n\ndef cite_outputs(outputs: Dict[str, Any], anchors: List[Dict[str, Any]]) -> Dict[str, Any]:\n\t\"\"\"\n\tCite(): append anchors to every claim; here we attach under a standard field if missing.\n\t\"\"\"\n\twith_cites = dict(outputs)\n\twith_cites.setdefault(\"citations\", anchors)\n\treturn with_cites\n\n\ndef log_minimal(goal: Dict[str, Any], outputs: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tLog(): emit minimal, privacy-respecting telemetry for reproducibility and distillation.\n\t\"\"\"\n\treturn {\"goal_hash\": hash(str(goal)) % (10**9), \"outputs_keys\": list(outputs.keys())}\n\n\n@dataclass(frozen=True)\nclass Budget:\n\ttokens: int = 64000\n\twall_sec: int = 120\n\tci_min: int = 10\n\n","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.program_trainer","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.program_trainer#L1-L60","kind":"module","name":"examples.scripts.program_trainer","path":"examples/scripts/program_trainer.py","language":"python","start_line":1,"end_line":60,"context_start_line":1,"context_end_line":60,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\treturn json.loads(fh.read())\n\n\ndef run_program_training(adapters_dir: str, plan_json: str, out_json: str) -> str:\n\t\"\"\"\n\tSmoke-level trainer hook used by examples: simulates a training pass\n\tgrounded to a ProgramGraph-based TrainingPlan by producing a summary\n\tand a trained marker under adapters_dir.\n\t\"\"\"\n\tadapters_dir_abs = os.path.abspath(adapters_dir)\n\tplan = _read_json(plan_json)\n\tplan_obj = plan.get(\"DatasetTrainingPlan\") or {}\n\tdatasets = (plan_obj.get(\"plan\") or {}).get(\"datasets\", [])\n\tschedule = (plan_obj.get(\"plan\") or {}).get(\"schedule\", [])\n\n\t# Simulate \"training\" by writing a marker file\n\tPath(adapters_dir_abs).mkdir(parents=True, exist_ok=True)\n\tmarker = Path(adapters_dir_abs) / \"TRAINED.OK\"\n\tmarker.write_text(\"trained=1\\n\", encoding=\"utf-8\")\n\n\tsummary = {\n\t\t\"trained\": True,\n\t\t\"datasets\": datasets,\n\t\t\"schedule\": schedule,\n\t\t\"artifacts\": {\"marker\": str(marker)},\n\t}\n\tobj = {\"schema_version\": 1, \"TrainingSummary\": summary}\n\tos.makedirs(os.path.dirname(out_json), exist_ok=True)\n\twith open(out_json, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\treturn out_json\n\n\ndef main() -> None:\n\tif len(sys.argv) < 4:\n\t\tprint(\"usage: python program_trainer.py \", file=sys.stderr)\n\t\tsys.exit(2)\n\tadapters_dir = sys.argv[1]\n\tplan_json = sys.argv[2]\n\tout_json = sys.argv[3]\n\tpath = run_program_training(adapters_dir, plan_json, out_json)\n\tprint(path)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","source_hash":"6a30524660eb39f1f7c29bbffef13569864df494a86e5879f0a0168777a0540b","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.program_trainer._read_json","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.program_trainer._read_json#L10-L12","kind":"function","name":"_read_json","path":"examples/scripts/program_trainer.py","language":"python","start_line":10,"end_line":12,"context_start_line":1,"context_end_line":32,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\treturn json.loads(fh.read())\n\n\ndef run_program_training(adapters_dir: str, plan_json: str, out_json: str) -> str:\n\t\"\"\"\n\tSmoke-level trainer hook used by examples: simulates a training pass\n\tgrounded to a ProgramGraph-based TrainingPlan by producing a summary\n\tand a trained marker under adapters_dir.\n\t\"\"\"\n\tadapters_dir_abs = os.path.abspath(adapters_dir)\n\tplan = _read_json(plan_json)\n\tplan_obj = plan.get(\"DatasetTrainingPlan\") or {}\n\tdatasets = (plan_obj.get(\"plan\") or {}).get(\"datasets\", [])\n\tschedule = (plan_obj.get(\"plan\") or {}).get(\"schedule\", [])\n\n\t# Simulate \"training\" by writing a marker file\n\tPath(adapters_dir_abs).mkdir(parents=True, exist_ok=True)\n\tmarker = Path(adapters_dir_abs) / \"TRAINED.OK\"\n\tmarker.write_text(\"trained=1\\n\", encoding=\"utf-8\")\n\n\tsummary = {","source_hash":"6a30524660eb39f1f7c29bbffef13569864df494a86e5879f0a0168777a0540b","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.program_trainer.run_program_training","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.program_trainer.run_program_training#L15-L42","kind":"function","name":"run_program_training","path":"examples/scripts/program_trainer.py","language":"python","start_line":15,"end_line":42,"context_start_line":1,"context_end_line":60,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\treturn json.loads(fh.read())\n\n\ndef run_program_training(adapters_dir: str, plan_json: str, out_json: str) -> str:\n\t\"\"\"\n\tSmoke-level trainer hook used by examples: simulates a training pass\n\tgrounded to a ProgramGraph-based TrainingPlan by producing a summary\n\tand a trained marker under adapters_dir.\n\t\"\"\"\n\tadapters_dir_abs = os.path.abspath(adapters_dir)\n\tplan = _read_json(plan_json)\n\tplan_obj = plan.get(\"DatasetTrainingPlan\") or {}\n\tdatasets = (plan_obj.get(\"plan\") or {}).get(\"datasets\", [])\n\tschedule = (plan_obj.get(\"plan\") or {}).get(\"schedule\", [])\n\n\t# Simulate \"training\" by writing a marker file\n\tPath(adapters_dir_abs).mkdir(parents=True, exist_ok=True)\n\tmarker = Path(adapters_dir_abs) / \"TRAINED.OK\"\n\tmarker.write_text(\"trained=1\\n\", encoding=\"utf-8\")\n\n\tsummary = {\n\t\t\"trained\": True,\n\t\t\"datasets\": datasets,\n\t\t\"schedule\": schedule,\n\t\t\"artifacts\": {\"marker\": str(marker)},\n\t}\n\tobj = {\"schema_version\": 1, \"TrainingSummary\": summary}\n\tos.makedirs(os.path.dirname(out_json), exist_ok=True)\n\twith open(out_json, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\treturn out_json\n\n\ndef main() -> None:\n\tif len(sys.argv) < 4:\n\t\tprint(\"usage: python program_trainer.py \", file=sys.stderr)\n\t\tsys.exit(2)\n\tadapters_dir = sys.argv[1]\n\tplan_json = sys.argv[2]\n\tout_json = sys.argv[3]\n\tpath = run_program_training(adapters_dir, plan_json, out_json)\n\tprint(path)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","source_hash":"6a30524660eb39f1f7c29bbffef13569864df494a86e5879f0a0168777a0540b","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.program_trainer.main","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.program_trainer.main#L45-L53","kind":"function","name":"main","path":"examples/scripts/program_trainer.py","language":"python","start_line":45,"end_line":53,"context_start_line":25,"context_end_line":60,"code":"\tschedule = (plan_obj.get(\"plan\") or {}).get(\"schedule\", [])\n\n\t# Simulate \"training\" by writing a marker file\n\tPath(adapters_dir_abs).mkdir(parents=True, exist_ok=True)\n\tmarker = Path(adapters_dir_abs) / \"TRAINED.OK\"\n\tmarker.write_text(\"trained=1\\n\", encoding=\"utf-8\")\n\n\tsummary = {\n\t\t\"trained\": True,\n\t\t\"datasets\": datasets,\n\t\t\"schedule\": schedule,\n\t\t\"artifacts\": {\"marker\": str(marker)},\n\t}\n\tobj = {\"schema_version\": 1, \"TrainingSummary\": summary}\n\tos.makedirs(os.path.dirname(out_json), exist_ok=True)\n\twith open(out_json, \"w\", encoding=\"utf-8\") as fh:\n\t\tfh.write(json.dumps(obj, indent=2))\n\treturn out_json\n\n\ndef main() -> None:\n\tif len(sys.argv) < 4:\n\t\tprint(\"usage: python program_trainer.py \", file=sys.stderr)\n\t\tsys.exit(2)\n\tadapters_dir = sys.argv[1]\n\tplan_json = sys.argv[2]\n\tout_json = sys.argv[3]\n\tpath = run_program_training(adapters_dir, plan_json, out_json)\n\tprint(path)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","source_hash":"6a30524660eb39f1f7c29bbffef13569864df494a86e5879f0a0168777a0540b","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.citation_enforcer","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.citation_enforcer#L1-L14","kind":"module","name":"examples.scripts.citation_enforcer","path":"examples/scripts/citation_enforcer.py","language":"python","start_line":1,"end_line":14,"context_start_line":1,"context_end_line":14,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict, List\n\n\ndef require_citations(outputs: Dict[str, Any], min_count: int = 1) -> Dict[str, Any]:\n\t\"\"\"\n\tEnsure outputs contain at least min_count citation anchors.\n\t\"\"\"\n\tcites = outputs.get(\"citations\") or []\n\tok = isinstance(cites, list) and len(cites) >= int(min_count)\n\treturn {\"ok\": ok, \"count\": len(cites) if isinstance(cites, list) else 0}\n\n","source_hash":"5fdb3bc8ad9091a9ded3bef7b0d3ac4d9960b3c93cef3b0ffea3bccded5956d8","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.citation_enforcer.require_citations","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.citation_enforcer.require_citations#L6-L12","kind":"function","name":"require_citations","path":"examples/scripts/citation_enforcer.py","language":"python","start_line":6,"end_line":12,"context_start_line":1,"context_end_line":14,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict, List\n\n\ndef require_citations(outputs: Dict[str, Any], min_count: int = 1) -> Dict[str, Any]:\n\t\"\"\"\n\tEnsure outputs contain at least min_count citation anchors.\n\t\"\"\"\n\tcites = outputs.get(\"citations\") or []\n\tok = isinstance(cites, list) and len(cites) >= int(min_count)\n\treturn {\"ok\": ok, \"count\": len(cites) if isinstance(cites, list) else 0}\n\n","source_hash":"5fdb3bc8ad9091a9ded3bef7b0d3ac4d9960b3c93cef3b0ffea3bccded5956d8","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.code_graph#L1-L1197","kind":"module","name":"examples.scripts.code_graph","path":"examples/scripts/code_graph.py","language":"python","start_line":1,"end_line":1197,"context_start_line":1,"context_end_line":1197,"code":"import os\nimport ast\nimport re\nimport json\nimport time # noqa: F401\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Tuple, Optional\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\n\n\n@dataclass\nclass Symbol:\n fqn: str\n name: str\n qualname: str\n kind: str # module|class|function|variable\n module: str\n file: str\n line: int\n end_line: int\n doc: Optional[str] = None\n signature: Optional[str] = None\n returns: Optional[str] = None\n\n\n@dataclass\nclass ModuleInfo:\n module: str\n file: str\n is_test: bool = False\n imports: Dict[str, str] = field(\n default_factory=dict\n ) # alias -> target (module or module.symbol)\n defs: List[str] = field(default_factory=list) # list of symbol FQNs\n exports: List[str] = field(default_factory=list) # names from __all__\n\n\nclass CodeGraph:\n def __init__(self, root: str, *, ignore: Optional[List[str]] = None) -> None:\n self.root = os.path.abspath(root)\n # Ignore patterns (relative to root) or glob-like; simple prefix/glob matching\n self._ignore: List[str] = []\n if ignore:\n # normalize to forward-slash relative prefixes for matching\n for pat in ignore:\n if not pat:\n continue\n p = os.path.normpath(pat)\n # store both relative and absolute forms for convenience\n self._ignore.append(p)\n # Load .gitignore as pathspec if available\n self._pspec = None\n try:\n gi = os.path.join(self.root, \".gitignore\")\n if pathspec is not None and os.path.exists(gi):\n with open(gi, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n lines = [ln.rstrip(\"\\n\") for ln in fh]\n self._pspec = pathspec.PathSpec.from_lines(\"gitwildmatch\", lines)\n except Exception:\n self._pspec = None\n self.symbols_by_fqn: Dict[str, Symbol] = {}\n self.symbols_by_name: Dict[str, List[str]] = {}\n self.modules: Dict[str, ModuleInfo] = {}\n self.indexed_files: List[str] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_fqn_or_key)\n self.module_to_tests: Dict[str, List[str]] = {}\n self.coverage_files: Dict[str, set[int]] = {}\n self.symbol_coverage: Dict[str, float] = {}\n self.module_imports: Dict[str, List[str]] = {}\n self.module_star_imports: Dict[str, List[str]] = {}\n self.pytest_nodes_by_module: Dict[str, List[str]] = {}\n self._cached_mtimes: Dict[str, int] = {}\n self._cached_hashes: Dict[str, str] = {}\n\n def _is_ignored(self, rel: str) -> bool:\n try:\n r = rel.replace(os.sep, \"/\")\n # pathspec first\n if self._pspec is not None:\n if self._pspec.match_file(r):\n return True\n # fallback: prefix match\n for pat in self._ignore:\n pp = pat.replace(os.sep, \"/\")\n if r == pp or r.startswith(pp + \"/\"):\n return True\n return False\n except Exception:\n return False\n\n @classmethod\n def load_or_build(cls, root: str, *, ignore_cache: bool = False, ignore: Optional[List[str]] = None) -> \"CodeGraph\":\n g = cls(root=root, ignore=ignore)\n g.build(ignore_cache=ignore_cache)\n return g\n\n def build(self, ignore_cache: bool = False) -> None:\n cache_path = os.path.join(self.root, \".codegraph.json\")\n if (not ignore_cache) and self._load_cache_relaxed(cache_path):\n # Incremental: reindex changed and dependents\n changed, removed = self._detect_changed_files(\n self._cached_mtimes, self._cached_hashes\n )\n if not changed and not removed:\n return\n self._incremental_reindex(changed, removed)\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n return\n for dirpath, dirnames, filenames in os.walk(self.root):\n # prune ignored directories in-place\n dir_rel = os.path.relpath(dirpath, self.root)\n # remove child dirs that are ignored\n dirnames[:] = [d for d in dirnames if not self._is_ignored(os.path.join(dir_rel, d))]\n if self._is_ignored(dir_rel):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n fpath = os.path.join(dirpath, fn)\n if self._is_ignored(os.path.relpath(fpath, self.root)):\n continue\n try:\n src = open(fpath, \"r\", encoding=\"utf-8\").read()\n except Exception:\n continue\n try:\n tree = ast.parse(src)\n except Exception:\n continue\n self.indexed_files.append(fpath)\n self._index_module(fpath, tree)\n # Build test mapping from imports in test modules\n self._build_test_mapping()\n # Expand star imports and post-resolve call targets\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n\n def _add_symbol(self, sym: Symbol) -> None:\n self.symbols_by_fqn[sym.fqn] = sym\n self.symbols_by_name.setdefault(sym.name, []).append(sym.fqn)\n mi = self.modules.setdefault(\n sym.module, ModuleInfo(module=sym.module, file=sym.file)\n )\n if sym.fqn not in mi.defs:\n mi.defs.append(sym.fqn)\n\n def _index_module(self, path: str, tree: ast.AST) -> None:\n module = self._module_name_for_path(path)\n is_test = (\"/tests/\" in path) or (os.path.basename(path).startswith(\"test_\"))\n self.modules.setdefault(\n module, ModuleInfo(module=module, file=path, is_test=is_test)\n )\n # Add module symbol\n mod_fqn = module\n mod_name = module.split(\".\")[-1]\n self._add_symbol(\n Symbol(\n fqn=mod_fqn,\n name=mod_name,\n qualname=\"\",\n kind=\"module\",\n module=module,\n file=path,\n line=1,\n end_line=1,\n )\n )\n # Visit\n visitor = _ModuleVisitor(module, path)\n visitor.visit(tree)\n # Register imports\n self.modules[module].imports.update(visitor.imports)\n # Module dependency edges\n self.module_imports[module] = sorted(visitor.import_modules)\n # Record star imports for later expansion\n self.module_star_imports[module] = list(getattr(visitor, \"star_imports\", []))\n # Record __all__ exports\n self.modules[module].exports = list(getattr(visitor, \"exports\", []))\n # Register defs\n for sym in visitor.symbols:\n self._add_symbol(sym)\n # Register calls\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n # Collect pytest nodes if test module\n if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:\n \"\"\"Ripgrep-based raw reference search (file, line_no, text).\"\"\"\n try:\n import subprocess\n\n out = subprocess.check_output([\"rg\", \"-n\", pattern, self.root], text=True)\n rows: List[Tuple[str, int, str]] = []\n for line in out.splitlines():\n try:\n fp, ln, txt = line.split(\":\", 2)\n rows.append((os.path.relpath(fp, self.root), int(ln), txt))\n except Exception:\n continue\n return rows\n except Exception:\n # Fallback: simple Python regex over indexed .py files\n rows: List[Tuple[str, int, str]] = []\n try:\n rx = re.compile(pattern)\n except Exception:\n # If pattern is not a valid regex, escape it\n rx = re.compile(re.escape(pattern))\n for fpath in self.indexed_files:\n rel = os.path.relpath(fpath, self.root)\n try:\n with open(fpath, \"r\", encoding=\"utf-8\", errors=\"ignore\") as rf:\n for i, ln in enumerate(rf, start=1):\n if rx.search(ln):\n rows.append((rel, i, ln.rstrip(\"\\n\")))\n except Exception:\n continue\n return rows\n\n # --- Helpers --- #\n\n def module_for_file(self, path: str) -> Optional[str]:\n p = path\n if not os.path.isabs(p):\n p = os.path.abspath(os.path.join(self.root, path))\n for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out\n\n def export_json(self) -> Dict[str, Any]:\n return {\n \"root\": self.root,\n \"files\": [os.path.relpath(p, self.root) for p in self.indexed_files],\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"coverage_files\": {\n os.path.relpath(k, self.root): sorted(list(v))\n for k, v in self.coverage_files.items()\n },\n \"symbol_coverage\": self.symbol_coverage,\n \"module_imports\": self.module_imports,\n }\n\n def export_sqlite(self, db_path: str) -> None:\n import sqlite3\n\n conn = sqlite3.connect(db_path)\n cur = conn.cursor()\n cur.executescript(\n \"\"\"\n PRAGMA journal_mode=WAL;\n CREATE TABLE IF NOT EXISTS files(path TEXT PRIMARY KEY);\n CREATE TABLE IF NOT EXISTS modules(module TEXT PRIMARY KEY, file TEXT, is_test INT);\n CREATE TABLE IF NOT EXISTS symbols(\n fqn TEXT PRIMARY KEY, name TEXT, qualname TEXT, kind TEXT, module TEXT,\n file TEXT, line INT, end_line INT, doc TEXT, signature TEXT, returns TEXT\n );\n CREATE TABLE IF NOT EXISTS calls(caller TEXT, callee TEXT);\n CREATE TABLE IF NOT EXISTS tests_map(module TEXT, test_module TEXT);\n CREATE TABLE IF NOT EXISTS coverage(file TEXT, line INT);\n CREATE TABLE IF NOT EXISTS mod_deps(module TEXT, dep TEXT);\n \"\"\"\n )\n cur.executemany(\n \"INSERT OR IGNORE INTO files(path) VALUES(?)\",\n [(os.path.relpath(f, self.root),) for f in self.indexed_files],\n )\n cur.executemany(\n \"INSERT OR REPLACE INTO modules(module,file,is_test) VALUES(?,?,?)\",\n [\n (m, os.path.relpath(mi.file, self.root), 1 if mi.is_test else 0)\n for m, mi in self.modules.items()\n ],\n )\n cur.executemany(\n \"INSERT OR REPLACE INTO symbols VALUES(?,?,?,?,?,?,?,?,?,?,?)\",\n [\n (\n s.fqn,\n s.name,\n s.qualname,\n s.kind,\n s.module,\n os.path.relpath(s.file, self.root),\n int(s.line),\n int(s.end_line),\n s.doc or \"\",\n s.signature or \"\",\n s.returns or \"\",\n )\n for s in self.symbols_by_fqn.values()\n ],\n )\n if self.calls:\n cur.executemany(\n \"INSERT INTO calls(caller,callee) VALUES(?,?)\", list(self.calls)\n )\n rows = []\n for mod, tests in self.module_to_tests.items():\n for t in tests:\n rows.append((mod, t))\n if rows:\n cur.executemany(\n \"INSERT INTO tests_map(module,test_module) VALUES(?,?)\", rows\n )\n cov_rows = []\n for f, lines in self.coverage_files.items():\n rel = os.path.relpath(f, self.root)\n cov_rows.extend([(rel, int(n)) for n in lines])\n if cov_rows:\n cur.executemany(\"INSERT INTO coverage(file,line) VALUES(?,?)\", cov_rows)\n dep_rows = []\n for m, deps in self.module_imports.items():\n for d in deps:\n dep_rows.append((m, d))\n if dep_rows:\n cur.executemany(\"INSERT INTO mod_deps(module,dep) VALUES(?,?)\", dep_rows)\n conn.commit()\n conn.close()\n\n def _module_name_for_path(self, path: str) -> str:\n rel = os.path.relpath(path, self.root)\n no_ext = rel[:-3] if rel.endswith(\".py\") else rel\n parts = no_ext.split(os.sep)\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n return \".\".join(p for p in parts if p)\n\n def _resolve_callee(\n self, module: str, callee_key: str, visitor: \"_ModuleVisitor\"\n ) -> Optional[str]:\n if \".\" in callee_key and \":\" not in callee_key:\n return callee_key\n\n if \":\" in callee_key:\n mod_alias, name = callee_key.split(\":\", 1)\n target = visitor.imports.get(mod_alias)\n if target:\n return f\"{target}.{name}\" if not target.endswith(f\".{name}\") else target\n\n mi = self.modules.get(module)\n if mi:\n # Prefer any def with same suffix name (matches within class or function)\n for f in mi.defs:\n if f.split(\".\")[-1] == callee_key:\n return f\n\n tgt = visitor.imports.get(callee_key)\n if tgt:\n return tgt\n return None\n\n def _build_test_mapping(self) -> None:\n for mod, mi in self.modules.items():\n if not mi.is_test:\n continue\n for alias, target in mi.imports.items():\n # target may be module or module.symbol\n m = target.split(\".\")[0]\n self.module_to_tests.setdefault(m, []).append(mod)\n\n def _expand_star_imports(self) -> None:\n for mod, stars in self.module_star_imports.items():\n mi = self.modules.get(mod)\n if not mi:\n continue\n for star_mod in stars:\n defs = [\n f\n for f in self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).defs\n ]\n exports = set(\n self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).exports\n or []\n )\n for fqn in defs:\n name = fqn.split(\".\")[-1]\n if exports:\n if name not in exports:\n continue\n elif name.startswith(\"_\"):\n continue\n if name not in mi.imports:\n mi.imports[name] = f\"{star_mod}.{name}\"\n\n def _post_resolve_calls(self) -> None:\n # After imports expanded, try to resolve unresolved simple names\n new_calls: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if \".\" in callee:\n new_calls.append((caller, callee))\n continue\n # Find caller module\n caller_mod = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n imports = self.modules.get(\n caller_mod, ModuleInfo(module=caller_mod, file=\"\")\n ).imports\n tgt = imports.get(callee)\n if tgt:\n new_calls.append((caller, tgt))\n else:\n # leave as-is\n new_calls.append((caller, callee))\n self.calls = new_calls\n\n def unresolved_calls(self) -> List[Tuple[str, str]]:\n return [\n (a, c)\n for (a, c) in self.calls\n if \".\" not in c and not self._is_builtin_name(c)\n ]\n\n def _collect_pytest_nodes(self, tree: ast.AST, rel_path: str) -> List[str]:\n nodes: List[str] = []\n # top-level test_* functions\n for n in getattr(tree, \"body\", []) or []:\n if isinstance(n, ast.FunctionDef) and n.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, None, n))\n if isinstance(n, ast.ClassDef) and n.name.startswith(\"Test\"):\n cls = n.name\n for m in getattr(n, \"body\", []) or []:\n if isinstance(m, ast.FunctionDef) and m.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, cls, m))\n return nodes\n\n def _expand_parametrize(\n self, rel_path: str, cls: Optional[str], fn: ast.FunctionDef\n ) -> List[str]:\n base = f\"{rel_path}::\" + (f\"{cls}::\" if cls else \"\") + fn.name\n # Look for @pytest.mark.parametrize(\"arg\", [vals])\n total: List[str] = []\n params: List[int] = []\n try:\n for dec in getattr(fn, \"decorator_list\", []) or []:\n # pytest.mark.parametrize(...)\n if (\n isinstance(dec, ast.Call)\n and isinstance(dec.func, ast.Attribute)\n and dec.func.attr == \"parametrize\"\n ):\n # estimate number of cases from second arg list length\n if len(dec.args) >= 2 and isinstance(\n dec.args[1], (ast.List, ast.Tuple)\n ):\n params.append(len(dec.args[1].elts))\n except Ex\n# ... truncated ...","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.Symbol","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.code_graph.Symbol#L15-L26","kind":"class","name":"Symbol","path":"examples/scripts/code_graph.py","language":"python","start_line":15,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"import os\nimport ast\nimport re\nimport json\nimport time # noqa: F401\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Tuple, Optional\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\n\n\n@dataclass\nclass Symbol:\n fqn: str\n name: str\n qualname: str\n kind: str # module|class|function|variable\n module: str\n file: str\n line: int\n end_line: int\n doc: Optional[str] = None\n signature: Optional[str] = None\n returns: Optional[str] = None\n\n\n@dataclass\nclass ModuleInfo:\n module: str\n file: str\n is_test: bool = False\n imports: Dict[str, str] = field(\n default_factory=dict\n ) # alias -> target (module or module.symbol)\n defs: List[str] = field(default_factory=list) # list of symbol FQNs\n exports: List[str] = field(default_factory=list) # names from __all__\n\n\nclass CodeGraph:\n def __init__(self, root: str, *, ignore: Optional[List[str]] = None) -> None:\n self.root = os.path.abspath(root)\n # Ignore patterns (relative to root) or glob-like; simple prefix/glob matching\n self._ignore: List[str] = []\n if ignore:","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.ModuleInfo","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.code_graph.ModuleInfo#L30-L38","kind":"class","name":"ModuleInfo","path":"examples/scripts/code_graph.py","language":"python","start_line":30,"end_line":38,"context_start_line":10,"context_end_line":58,"code":"except Exception: # pragma: no cover\n pathspec = None # type: ignore\n\n\n@dataclass\nclass Symbol:\n fqn: str\n name: str\n qualname: str\n kind: str # module|class|function|variable\n module: str\n file: str\n line: int\n end_line: int\n doc: Optional[str] = None\n signature: Optional[str] = None\n returns: Optional[str] = None\n\n\n@dataclass\nclass ModuleInfo:\n module: str\n file: str\n is_test: bool = False\n imports: Dict[str, str] = field(\n default_factory=dict\n ) # alias -> target (module or module.symbol)\n defs: List[str] = field(default_factory=list) # list of symbol FQNs\n exports: List[str] = field(default_factory=list) # names from __all__\n\n\nclass CodeGraph:\n def __init__(self, root: str, *, ignore: Optional[List[str]] = None) -> None:\n self.root = os.path.abspath(root)\n # Ignore patterns (relative to root) or glob-like; simple prefix/glob matching\n self._ignore: List[str] = []\n if ignore:\n # normalize to forward-slash relative prefixes for matching\n for pat in ignore:\n if not pat:\n continue\n p = os.path.normpath(pat)\n # store both relative and absolute forms for convenience\n self._ignore.append(p)\n # Load .gitignore as pathspec if available\n self._pspec = None\n try:\n gi = os.path.join(self.root, \".gitignore\")\n if pathspec is not None and os.path.exists(gi):","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.CodeGraph","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.code_graph.CodeGraph#L41-L863","kind":"class","name":"CodeGraph","path":"examples/scripts/code_graph.py","language":"python","start_line":41,"end_line":863,"context_start_line":21,"context_end_line":883,"code":" file: str\n line: int\n end_line: int\n doc: Optional[str] = None\n signature: Optional[str] = None\n returns: Optional[str] = None\n\n\n@dataclass\nclass ModuleInfo:\n module: str\n file: str\n is_test: bool = False\n imports: Dict[str, str] = field(\n default_factory=dict\n ) # alias -> target (module or module.symbol)\n defs: List[str] = field(default_factory=list) # list of symbol FQNs\n exports: List[str] = field(default_factory=list) # names from __all__\n\n\nclass CodeGraph:\n def __init__(self, root: str, *, ignore: Optional[List[str]] = None) -> None:\n self.root = os.path.abspath(root)\n # Ignore patterns (relative to root) or glob-like; simple prefix/glob matching\n self._ignore: List[str] = []\n if ignore:\n # normalize to forward-slash relative prefixes for matching\n for pat in ignore:\n if not pat:\n continue\n p = os.path.normpath(pat)\n # store both relative and absolute forms for convenience\n self._ignore.append(p)\n # Load .gitignore as pathspec if available\n self._pspec = None\n try:\n gi = os.path.join(self.root, \".gitignore\")\n if pathspec is not None and os.path.exists(gi):\n with open(gi, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n lines = [ln.rstrip(\"\\n\") for ln in fh]\n self._pspec = pathspec.PathSpec.from_lines(\"gitwildmatch\", lines)\n except Exception:\n self._pspec = None\n self.symbols_by_fqn: Dict[str, Symbol] = {}\n self.symbols_by_name: Dict[str, List[str]] = {}\n self.modules: Dict[str, ModuleInfo] = {}\n self.indexed_files: List[str] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_fqn_or_key)\n self.module_to_tests: Dict[str, List[str]] = {}\n self.coverage_files: Dict[str, set[int]] = {}\n self.symbol_coverage: Dict[str, float] = {}\n self.module_imports: Dict[str, List[str]] = {}\n self.module_star_imports: Dict[str, List[str]] = {}\n self.pytest_nodes_by_module: Dict[str, List[str]] = {}\n self._cached_mtimes: Dict[str, int] = {}\n self._cached_hashes: Dict[str, str] = {}\n\n def _is_ignored(self, rel: str) -> bool:\n try:\n r = rel.replace(os.sep, \"/\")\n # pathspec first\n if self._pspec is not None:\n if self._pspec.match_file(r):\n return True\n # fallback: prefix match\n for pat in self._ignore:\n pp = pat.replace(os.sep, \"/\")\n if r == pp or r.startswith(pp + \"/\"):\n return True\n return False\n except Exception:\n return False\n\n @classmethod\n def load_or_build(cls, root: str, *, ignore_cache: bool = False, ignore: Optional[List[str]] = None) -> \"CodeGraph\":\n g = cls(root=root, ignore=ignore)\n g.build(ignore_cache=ignore_cache)\n return g\n\n def build(self, ignore_cache: bool = False) -> None:\n cache_path = os.path.join(self.root, \".codegraph.json\")\n if (not ignore_cache) and self._load_cache_relaxed(cache_path):\n # Incremental: reindex changed and dependents\n changed, removed = self._detect_changed_files(\n self._cached_mtimes, self._cached_hashes\n )\n if not changed and not removed:\n return\n self._incremental_reindex(changed, removed)\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n return\n for dirpath, dirnames, filenames in os.walk(self.root):\n # prune ignored directories in-place\n dir_rel = os.path.relpath(dirpath, self.root)\n # remove child dirs that are ignored\n dirnames[:] = [d for d in dirnames if not self._is_ignored(os.path.join(dir_rel, d))]\n if self._is_ignored(dir_rel):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n fpath = os.path.join(dirpath, fn)\n if self._is_ignored(os.path.relpath(fpath, self.root)):\n continue\n try:\n src = open(fpath, \"r\", encoding=\"utf-8\").read()\n except Exception:\n continue\n try:\n tree = ast.parse(src)\n except Exception:\n continue\n self.indexed_files.append(fpath)\n self._index_module(fpath, tree)\n # Build test mapping from imports in test modules\n self._build_test_mapping()\n # Expand star imports and post-resolve call targets\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n\n def _add_symbol(self, sym: Symbol) -> None:\n self.symbols_by_fqn[sym.fqn] = sym\n self.symbols_by_name.setdefault(sym.name, []).append(sym.fqn)\n mi = self.modules.setdefault(\n sym.module, ModuleInfo(module=sym.module, file=sym.file)\n )\n if sym.fqn not in mi.defs:\n mi.defs.append(sym.fqn)\n\n def _index_module(self, path: str, tree: ast.AST) -> None:\n module = self._module_name_for_path(path)\n is_test = (\"/tests/\" in path) or (os.path.basename(path).startswith(\"test_\"))\n self.modules.setdefault(\n module, ModuleInfo(module=module, file=path, is_test=is_test)\n )\n # Add module symbol\n mod_fqn = module\n mod_name = module.split(\".\")[-1]\n self._add_symbol(\n Symbol(\n fqn=mod_fqn,\n name=mod_name,\n qualname=\"\",\n kind=\"module\",\n module=module,\n file=path,\n line=1,\n end_line=1,\n )\n )\n # Visit\n visitor = _ModuleVisitor(module, path)\n visitor.visit(tree)\n # Register imports\n self.modules[module].imports.update(visitor.imports)\n # Module dependency edges\n self.module_imports[module] = sorted(visitor.import_modules)\n # Record star imports for later expansion\n self.module_star_imports[module] = list(getattr(visitor, \"star_imports\", []))\n # Record __all__ exports\n self.modules[module].exports = list(getattr(visitor, \"exports\", []))\n # Register defs\n for sym in visitor.symbols:\n self._add_symbol(sym)\n # Register calls\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n # Collect pytest nodes if test module\n if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:\n \"\"\"Ripgrep-based raw reference search (file, line_no, text).\"\"\"\n try:\n import subprocess\n\n out = subprocess.check_output([\"rg\", \"-n\", pattern, self.root], text=True)\n rows: List[Tuple[str, int, str]] = []\n for line in out.splitlines():\n try:\n fp, ln, txt = line.split(\":\", 2)\n rows.append((os.path.relpath(fp, self.root), int(ln), txt))\n except Exception:\n continue\n return rows\n except Exception:\n # Fallback: simple Python regex over indexed .py files\n rows: List[Tuple[str, int, str]] = []\n try:\n rx = re.compile(pattern)\n except Exception:\n # If pattern is not a valid regex, escape it\n rx = re.compile(re.escape(pattern))\n for fpath in self.indexed_files:\n rel = os.path.relpath(fpath, self.root)\n try:\n with open(fpath, \"r\", encoding=\"utf-8\", errors=\"ignore\") as rf:\n for i, ln in enumerate(rf, start=1):\n if rx.search(ln):\n rows.append((rel, i, ln.rstrip(\"\\n\")))\n except Exception:\n continue\n return rows\n\n # --- Helpers --- #\n\n def module_for_file(self, path: str) -> Optional[str]:\n p = path\n if not os.path.isabs(p):\n p = os.path.abspath(os.path.join(self.root, path))\n for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out\n\n def export_json(self) -> Dict[str, Any]:\n return {\n \"root\": self.root,\n \"files\": [os.path.relpath(p, self.root) for p in self.indexed_files],\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"coverage_files\": {\n os.path.relpath(k, self.root): sorted(list(v))\n for k, v in self.coverage_files.items()\n },\n \"symbol_coverage\": self.symbol_coverage,\n \"module_imports\": self.module_imports,\n }\n\n def export_sqlite(self, db_path: str) -> None:\n import sqlite3\n\n conn = sqlite3.connect(db_path)\n cur = conn.cursor()\n cur.executescript(\n \"\"\"\n PRAGMA journal_mode=WAL;\n CREATE TABLE IF NOT EXISTS files(path TEXT PRIMARY KEY);\n CREATE TABLE IF NOT EXISTS modules(module TEXT PRIMARY KEY, file TEXT, is_test INT);\n CREATE TABLE IF NOT EXISTS symbols(\n fqn TEXT PRIMARY KEY, name TEXT, qualname TEXT, kind TEXT, module TEXT,\n file TEXT, line INT, end_line INT, doc TEXT, signature TEXT, returns TEXT\n );\n CREATE TABLE IF NOT EXISTS calls(caller TEXT, callee TEXT);\n CREATE TABLE IF NOT EXISTS tests_map(module TEXT, test_module TEXT);\n CREATE TABLE IF NOT EXISTS coverage(file TEXT, line INT);\n CREATE TABLE IF NOT EXISTS mod_deps(module TEXT, dep TEXT);\n \"\"\"\n )\n cur.executemany(\n \"INSERT OR IGNORE INTO files(path) VALUES(?)\",\n [(os.path.relpath(f, self.root),) for f in self.indexed_files],\n )\n cur.executemany(\n \"INSERT OR REPLACE INTO modules(module,file,is_test) VALUES(?,?,?)\",\n [\n (m, os.path.relpath(mi.file, self.root), 1 if mi.is_test else 0)\n for m, mi in self.modules.items()\n ],\n )\n cur.executemany(\n \"INSERT OR REPLACE INTO symbols VALUES(?,?,?,?,?,?,?,?,?,?,?)\",\n [\n (\n s.fqn,\n s.name,\n s.qualname,\n s.kind,\n s.module,\n os.path.relpath(s.file, self.root),\n int(s.line),\n int(s.end_line),\n s.doc or \"\",\n s.signature or \"\",\n s.returns or \"\",\n )\n for s in self.symbols_by_fqn.values()\n ],\n )\n if self.calls:\n cur.executemany(\n \"INSERT INTO calls(caller,callee) VALUES(?,?)\", list(self.calls)\n )\n rows = []\n for mod, tests in self.module_to_tests.items():\n for t in tests:\n rows.append((mod, t))\n if rows:\n cur.executemany(\n \"INSERT INTO tests_map(module,test_module) VALUES(?,?)\", rows\n )\n cov_rows = []\n for f, lines in self.coverage_files.items():\n rel = os.path.relpath(f, self.root)\n cov_rows.extend([(rel, int(n)) for n in lines])\n if cov_rows:\n cur.executemany(\"INSERT INTO coverage(file,line) VALUES(?,?)\", cov_rows)\n dep_rows = []\n for m, deps in self.module_imports.items():\n for d in deps:\n dep_rows.append((m, d))\n if dep_rows:\n cur.executemany(\"INSERT INTO mod_deps(module,dep) VALUES(?,?)\", dep_rows)\n conn.commit()\n conn.close()\n\n def _module_name_for_path(self, path: str) -> str:\n rel = os.path.relpath(path, self.root)\n no_ext = rel[:-3] if rel.endswith(\".py\") else rel\n parts = no_ext.split(os.sep)\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n return \".\".join(p for p in parts if p)\n\n def _resolve_callee(\n self, module: str, callee_key: str, visitor: \"_ModuleVisitor\"\n ) -> Optional[str]:\n if \".\" in callee_key and \":\" not in callee_key:\n return callee_key\n\n if \":\" in callee_key:\n mod_alias, name = callee_key.split(\":\", 1)\n target = visitor.imports.get(mod_alias)\n if target:\n return f\"{target}.{name}\" if not target.endswith(f\".{name}\") else target\n\n mi = self.modules.get(module)\n if mi:\n # Prefer any def with same suffix name (matches within class or function)\n for f in mi.defs:\n if f.split(\".\")[-1] == callee_key:\n return f\n\n tgt = visitor.imports.get(callee_key)\n if tgt:\n return tgt\n return None\n\n def _build_test_mapping(self) -> None:\n for mod, mi in self.modules.items():\n if not mi.is_test:\n continue\n for alias, target in mi.imports.items():\n # target may be module or module.symbol\n m = target.split(\".\")[0]\n self.module_to_tests.setdefault(m, []).append(mod)\n\n def _expand_star_imports(self) -> None:\n for mod, stars in self.module_star_imports.items():\n mi = self.modules.get(mod)\n if not mi:\n continue\n for star_mod in stars:\n defs = [\n f\n for f in self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).defs\n ]\n exports = set(\n self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).exports\n or []\n )\n for fqn in defs:\n name = fqn.split(\".\")[-1]\n if exports:\n if name not in exports:\n continue\n elif name.startswith(\"_\"):\n continue\n if name not in mi.imports:\n mi.imports[name] = f\"{star_mod}.{name}\"\n\n def _post_resolve_calls(self) -> None:\n # After imports expanded, try to resolve unresolved simple names\n new_calls: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if \".\" in callee:\n new_calls.append((caller, callee))\n continue\n # Find caller module\n caller_mod = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n imports = self.modules.get(\n caller_mod, ModuleInfo(module=caller_mod, file=\"\")\n ).imports\n tgt = imports.get(callee)\n if tgt:\n new_calls.append((caller, tgt))\n else:\n # leave as-is\n new_calls.append((caller, callee))\n self.calls = new_calls\n\n def unresolved_calls(self) -> List[Tuple[str, str]]:\n return [\n (a, c)\n for (a, c) in self.calls\n if \".\" not in c and not self._is_builtin_name(c)\n ]\n\n def _collect_pytest_nodes(self, tree: ast.AST, rel_path: str) -> List[str]:\n nodes: List[str] = []\n # top-level test_* functions\n for n in getattr(tree, \"body\", []) or []:\n if isinstance(n, ast.FunctionDef) and n.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, None, n))\n if isinstance(n, ast.ClassDef) and n.name.startswith(\"Test\"):\n cls = n.name\n for m in getattr(n, \"body\", []) or []:\n if isinstance(m, ast.FunctionDef) and m.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, cls, m))\n return nodes\n\n def _expand_parametrize(\n self, rel_path: str, cls: Optional[str], fn: ast.FunctionDef\n ) -> List[str]:\n base = f\"{rel_path}::\" + (f\"{cls}::\" if cls else \"\") + fn.name\n # Look for @pytest.mark.parametrize(\"arg\", [vals])\n total: List[str] = []\n params: List[int] = []\n try:\n for dec in getattr(fn, \"decorator_list\", []) or []:\n # pytest.mark.parametrize(...)\n if (\n isinstance(dec, ast.Call)\n and isinstance(dec.func, ast.Attribute)\n and dec.func.attr == \"parametrize\"\n ):\n # estimate number of cases from second arg list length\n if len(dec.args) >= 2 and isinstance(\n dec.args[1], (ast.List, ast.Tuple)\n ):\n params.append(len(dec.args[1].elts))\n except Exception:\n pass\n if params:\n count: int = 1\n for k in params:\n try:\n count *= int(k)\n except Exception:\n count = max(count, 1)\n for i in range(count):\n total.append(f\"{base}[{i}]\")\n return total\n return [base]\n\n # --- Cache --- #\n\n def _try_load_cache(self\n# ... truncated ...","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._cli","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._cli#L866-L958","kind":"function","name":"_cli","path":"examples/scripts/code_graph.py","language":"python","start_line":866,"end_line":958,"context_start_line":846,"context_end_line":978,"code":" for fqn, sym in self.symbols_by_fqn.items():\n covered = files_hits.get(sym.file, set())\n a = int(sym.line)\n b = int(sym.end_line) if int(sym.end_line) >= a else a\n span = list(range(a, b + 1))\n if not span:\n sym_cov[fqn] = 0.0\n continue\n hits = sum(1 for x in span if x in covered)\n sym_cov[fqn] = hits / float(len(span))\n self.symbol_coverage = sym_cov\n except Exception:\n # Leave coverage empty on error\n self.coverage_files = {}\n self.symbol_coverage = {}\n\n def coverage_of(self, fqn: str) -> Optional[float]:\n return self.symbol_coverage.get(fqn)\n\n\ndef _cli() -> None:\n import argparse\n import json\n\n p = argparse.ArgumentParser()\n p.add_argument(\"root\", nargs=\"?\", default=\"./repo\")\n p.add_argument(\"--ignore\", action=\"append\", default=None, help=\"Relative paths to ignore (repeatable)\")\n p.add_argument(\"--owners-of\", dest=\"owners_of\", default=None)\n p.add_argument(\"--search\", dest=\"search\", default=None)\n p.add_argument(\"--defs-in\", dest=\"defs_in\", default=None)\n p.add_argument(\"--calls-of\", dest=\"calls_of\", default=None)\n p.add_argument(\"--who-calls\", dest=\"who_calls\", default=None)\n p.add_argument(\"--dump\", dest=\"dump\", action=\"store_true\")\n p.add_argument(\"--coverage-xml\", dest=\"coverage_xml\", default=None)\n p.add_argument(\"--coverage-of\", dest=\"coverage_of\", default=None)\n p.add_argument(\"--refs-of\", dest=\"refs_of\", default=None)\n p.add_argument(\"--tests-for\", dest=\"tests_for\", default=None)\n p.add_argument(\"--tests-for-module\", dest=\"tests_for_module\", default=None)\n p.add_argument(\"--export\", dest=\"export\", default=None)\n p.add_argument(\"--no-cache\", dest=\"no_cache\", action=\"store_true\")\n p.add_argument(\"--export-sqlite\", dest=\"export_sqlite\", default=None)\n p.add_argument(\"--pytest-nodes\", dest=\"pytest_nodes\", default=None)\n p.add_argument(\"--module-deps\", dest=\"module_deps\", default=None)\n p.add_argument(\"--unresolved\", dest=\"unresolved\", action=\"store_true\")\n args = p.parse_args()\n g = CodeGraph.load_or_build(args.root, ignore_cache=bool(args.no_cache), ignore=[s for s in (args.ignore or []) if s])\n if args.coverage_xml:\n g.attach_coverage_from_xml(args.coverage_xml)\n # fall through to other queries if provided\n if args.owners_of:\n print(json.dumps(g.owners_of(args.owners_of)))\n return\n if args.search:\n print(json.dumps(g.search_refs(args.search)))\n return\n if args.defs_in:\n print(json.dumps(g.defs_in(args.defs_in)))\n return\n if args.calls_of:\n print(json.dumps(g.calls_of(args.calls_of)))\n return\n if args.who_calls:\n print(json.dumps(g.who_calls(args.who_calls)))\n return\n if args.coverage_of:\n print(json.dumps(g.coverage_of(args.coverage_of)))\n return\n if args.refs_of:\n print(json.dumps(g.refs_of(args.refs_of)))\n return\n if args.tests_for:\n print(json.dumps(g.tests_for_symbol(args.tests_for)))\n return\n if args.tests_for_module:\n print(json.dumps(g.tests_for_module(args.tests_for_module)))\n return\n if args.export:\n obj = g.export_json()\n if args.export == \"-\":\n print(json.dumps(obj))\n else:\n open(args.export, \"w\", encoding=\"utf-8\").write(json.dumps(obj))\n print(args.export)\n return\n if args.export_sqlite:\n g.export_sqlite(args.export_sqlite)\n print(args.export_sqlite)\n return\n if args.pytest_nodes:\n mod = args.pytest_nodes\n print(json.dumps(g.pytest_nodes_by_module.get(mod, [])))\n return\n if args.module_deps:\n print(json.dumps(g.module_imports.get(args.module_deps, [])))\n return\n if args.unresolved:\n print(json.dumps(g.unresolved_calls()))\n return\n if args.dump:\n print(\n json.dumps(\n {\n \"files\": len(g.indexed_files),\n \"symbols\": len(g.symbols_by_fqn),\n \"modules\": len(g.modules),\n \"calls\": len(g.calls),\n \"coverage_files\": len(g.coverage_files),\n }\n )\n )\n return\n # Dump summary\n print(json.dumps({\"files\": len(g.indexed_files), \"symbols\": len(g.symbols_by_fqn)}))\n\n\nclass _ModuleVisitor(ast.NodeVisitor):\n def __init__(self, module: str, path: str) -> None:\n self.module = module\n self.path = path\n self.symbols: List[Symbol] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_key)\n self.stack: List[str] = [] # qualname stack\n self.class_stack: List[str] = []\n self.imports: Dict[str, str] = {}\n self.import_modules: List[str] = []\n self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._ModuleVisitor","uri":"program://Program_Conditioned_Adapter/class/examples.scripts.code_graph._ModuleVisitor#L961-L1193","kind":"class","name":"_ModuleVisitor","path":"examples/scripts/code_graph.py","language":"python","start_line":961,"end_line":1193,"context_start_line":941,"context_end_line":1197,"code":" if args.unresolved:\n print(json.dumps(g.unresolved_calls()))\n return\n if args.dump:\n print(\n json.dumps(\n {\n \"files\": len(g.indexed_files),\n \"symbols\": len(g.symbols_by_fqn),\n \"modules\": len(g.modules),\n \"calls\": len(g.calls),\n \"coverage_files\": len(g.coverage_files),\n }\n )\n )\n return\n # Dump summary\n print(json.dumps({\"files\": len(g.indexed_files), \"symbols\": len(g.symbols_by_fqn)}))\n\n\nclass _ModuleVisitor(ast.NodeVisitor):\n def __init__(self, module: str, path: str) -> None:\n self.module = module\n self.path = path\n self.symbols: List[Symbol] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_key)\n self.stack: List[str] = [] # qualname stack\n self.class_stack: List[str] = []\n self.imports: Dict[str, str] = {}\n self.import_modules: List[str] = []\n self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None\n\n def _fqn(self, name: str) -> str:\n q = self._cur_qualname()\n return f\"{self.module}.{q + ('.' if q else '')}{name}\"\n\n def visit_Import(self, node: ast.Import) -> Any: # type: ignore[override]\n for alias in node.names:\n asname = alias.asname or alias.name.split(\".\")[-1]\n self.imports[asname] = alias.name\n self.import_modules.append(alias.name.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: # type: ignore[override]\n # Resolve relative imports: from .x import y\n if node.level and node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n prefix = base[:-up] if up > 0 else base\n mod = \".\".join([p for p in prefix if p] + [node.module])\n elif node.level and not node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n mod = \".\".join(base[:-up])\n else:\n mod = node.module or \"\"\n for alias in node.names:\n # star import\n if getattr(alias, \"name\", \"\") == \"*\":\n if mod:\n self.star_imports.append(mod)\n continue\n asname = alias.asname or alias.name\n self.imports[asname] = f\"{mod}.{alias.name}\" if mod else alias.name\n if mod:\n self.import_modules.append(mod.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ClassDef(self, node: ast.ClassDef) -> Any: # type: ignore[override]\n fqn = self._fqn(node.name)\n try:\n doc_s = ast.get_docstring(node) or None\n except Exception:\n doc_s = None\n sym = Symbol(\n fqn=fqn,\n name=node.name,\n qualname=self._cur_qualname(),\n kind=\"class\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n self.class_stack.append(node.name)\n self.generic_visit(node)\n self.class_stack.pop()\n self.stack.pop()\n\n def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def _visit_func_like(self, node: Any) -> None:\n fqn = self._fqn(node.name)\n # Signature & returns\n sig_s, ret_s = None, None\n try:\n params = []\n for a in getattr(node, \"args\", None).args or []:\n nm = getattr(a, \"arg\", \"\")\n ann = getattr(a, \"annotation\", None)\n params.append(f\"{nm}:{ast.unparse(ann)}\" if ann is not None else nm)\n ret = getattr(node, \"returns\", None)\n ret_s = ast.unparse(ret) if ret is not None else None\n sig_s = f\"({', '.join(params)})\"\n except Exception:\n sig_s, ret_s = None, None\n try:\n doc_s = ast.get_docstring(node) or None\n except Exception:\n doc_s = None\n sym = Symbol(\n fqn=fqn,\n name=node.name,\n qualname=self._cur_qualname(),\n kind=\"function\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n signature=sig_s,\n returns=ret_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n # Traverse body to collect calls\n for sub in ast.walk(node):\n if isinstance(sub, ast.Call):\n callee_key = self._extract_callee_key(sub.func)\n if callee_key:\n self.calls.append((fqn, callee_key))\n # Decorators as calls\n for dec in getattr(node, \"decorator_list\", []) or []:\n callee_key = self._extract_callee_key(dec)\n if callee_key:\n self.calls.append((fqn, callee_key))\n self.stack.pop()\n\n def visit_Assign(self, node: ast.Assign) -> Any: # type: ignore[override]\n for t in getattr(node, \"targets\", []) or []:\n if isinstance(t, ast.Name):\n fqn = self._fqn(t.id)\n sym = Symbol(\n fqn=fqn,\n name=t.id,\n qualname=self._cur_qualname(),\n kind=\"variable\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n )\n self.symbols.append(sym)\n # capture __all__ = [\"...\"]\n try:\n names = []\n is_all = any(\n (isinstance(t, ast.Name) and t.id == \"__all__\") for t in node.targets\n )\n if is_all and isinstance(node.value, (ast.List, ast.Tuple)):\n for el in node.value.elts:\n if isinstance(el, ast.Constant) and isinstance(el.value, str):\n names.append(el.value)\n if names:\n self.exports.extend(names)\n except Exception:\n pass\n self.generic_visit(node)\n\n def _extract_callee_key(self, fn: ast.AST) -> Optional[str]:\n # simple name\n if isinstance(fn, ast.Name):\n return fn.id\n\n # super().method()\n if (\n isinstance(fn, ast.Attribute)\n and isinstance(fn.value, ast.Call)\n and isinstance(fn.value.func, ast.Name)\n and fn.value.func.id == \"super\"\n ):\n meth = fn.attr\n cur_cls = self._cur_class()\n if cur_cls:\n return f\"{self.module}.{cur_cls}.{meth}\"\n return meth\n\n # obj.attr chain\n if isinstance(fn, ast.Attribute):\n parts: List[str] = []\n cur = fn\n while isinstance(cur, ast.Attribute):\n parts.append(cur.attr)\n cur = cur.value\n parts.reverse()\n\n if isinstance(cur, ast.Name):\n base = cur.id\n if base in (\"self\", \"cls\"):\n cur_cls = self._cur_class()\n if cur_cls and parts:\n return f\"{self.module}.{cur_cls}.{parts[-1]}\"\n return f\"{self.module}.{cur_cls}\" if cur_cls else parts[-1]\n if base in self.imports:\n return f\"{base}:{parts[-1]}\" if parts else base\n return f\"{self.module}.{base}.{parts[-1]}\" if parts else base\n # getattr(module, \"name\") heuristic\n if (\n isinstance(fn, ast.Call)\n and isinstance(fn.func, ast.Name)\n and fn.func.id == \"getattr\"\n and fn.args\n and len(fn.args) >= 2\n and isinstance(fn.args[0], ast.Name)\n and isinstance(fn.args[1], ast.Constant)\n and isinstance(fn.args[1].value, str)\n ):\n base = fn.args[0].id\n name = fn.args[1].value\n if base in self.imports:\n return f\"{self.imports[base]}.{name}\"\n cur_cls = self._cur_class()\n if base in (\"self\", \"cls\") and cur_cls:\n return f\"{self.module}.{cur_cls}.{name}\"\n return f\"{self.module}.{base}.{name}\"\n # importlib.import_module(\"pkg.mod\") heuristic\n if (\n isinstance(fn, ast.Call)\n and isinstance(fn.func, ast.Attribute)\n and isinstance(fn.func.value, ast.Name)\n and fn.func.value.id == \"importlib\"\n and fn.func.attr == \"import_module\"\n and fn.args\n and isinstance(fn.args[0], ast.Constant)\n and isinstance(fn.args[0].value, str)\n ):\n mod = str(fn.args[0].value)\n return mod\n return None\n\n\nif __name__ == \"__main__\":\n _cli()","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.__init__#L962-L972","kind":"function","name":"__init__","path":"examples/scripts/code_graph.py","language":"python","start_line":962,"end_line":972,"context_start_line":942,"context_end_line":992,"code":" print(json.dumps(g.unresolved_calls()))\n return\n if args.dump:\n print(\n json.dumps(\n {\n \"files\": len(g.indexed_files),\n \"symbols\": len(g.symbols_by_fqn),\n \"modules\": len(g.modules),\n \"calls\": len(g.calls),\n \"coverage_files\": len(g.coverage_files),\n }\n )\n )\n return\n # Dump summary\n print(json.dumps({\"files\": len(g.indexed_files), \"symbols\": len(g.symbols_by_fqn)}))\n\n\nclass _ModuleVisitor(ast.NodeVisitor):\n def __init__(self, module: str, path: str) -> None:\n self.module = module\n self.path = path\n self.symbols: List[Symbol] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_key)\n self.stack: List[str] = [] # qualname stack\n self.class_stack: List[str] = []\n self.imports: Dict[str, str] = {}\n self.import_modules: List[str] = []\n self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None\n\n def _fqn(self, name: str) -> str:\n q = self._cur_qualname()\n return f\"{self.module}.{q + ('.' if q else '')}{name}\"\n\n def visit_Import(self, node: ast.Import) -> Any: # type: ignore[override]\n for alias in node.names:\n asname = alias.asname or alias.name.split(\".\")[-1]\n self.imports[asname] = alias.name\n self.import_modules.append(alias.name.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: # type: ignore[override]\n # Resolve relative imports: from .x import y","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._is_ignored","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._is_ignored#L78-L92","kind":"function","name":"_is_ignored","path":"examples/scripts/code_graph.py","language":"python","start_line":78,"end_line":92,"context_start_line":58,"context_end_line":112,"code":" if pathspec is not None and os.path.exists(gi):\n with open(gi, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n lines = [ln.rstrip(\"\\n\") for ln in fh]\n self._pspec = pathspec.PathSpec.from_lines(\"gitwildmatch\", lines)\n except Exception:\n self._pspec = None\n self.symbols_by_fqn: Dict[str, Symbol] = {}\n self.symbols_by_name: Dict[str, List[str]] = {}\n self.modules: Dict[str, ModuleInfo] = {}\n self.indexed_files: List[str] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_fqn_or_key)\n self.module_to_tests: Dict[str, List[str]] = {}\n self.coverage_files: Dict[str, set[int]] = {}\n self.symbol_coverage: Dict[str, float] = {}\n self.module_imports: Dict[str, List[str]] = {}\n self.module_star_imports: Dict[str, List[str]] = {}\n self.pytest_nodes_by_module: Dict[str, List[str]] = {}\n self._cached_mtimes: Dict[str, int] = {}\n self._cached_hashes: Dict[str, str] = {}\n\n def _is_ignored(self, rel: str) -> bool:\n try:\n r = rel.replace(os.sep, \"/\")\n # pathspec first\n if self._pspec is not None:\n if self._pspec.match_file(r):\n return True\n # fallback: prefix match\n for pat in self._ignore:\n pp = pat.replace(os.sep, \"/\")\n if r == pp or r.startswith(pp + \"/\"):\n return True\n return False\n except Exception:\n return False\n\n @classmethod\n def load_or_build(cls, root: str, *, ignore_cache: bool = False, ignore: Optional[List[str]] = None) -> \"CodeGraph\":\n g = cls(root=root, ignore=ignore)\n g.build(ignore_cache=ignore_cache)\n return g\n\n def build(self, ignore_cache: bool = False) -> None:\n cache_path = os.path.join(self.root, \".codegraph.json\")\n if (not ignore_cache) and self._load_cache_relaxed(cache_path):\n # Incremental: reindex changed and dependents\n changed, removed = self._detect_changed_files(\n self._cached_mtimes, self._cached_hashes\n )\n if not changed and not removed:\n return\n self._incremental_reindex(changed, removed)\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.load_or_build","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.load_or_build#L95-L98","kind":"function","name":"load_or_build","path":"examples/scripts/code_graph.py","language":"python","start_line":95,"end_line":98,"context_start_line":75,"context_end_line":118,"code":" self._cached_mtimes: Dict[str, int] = {}\n self._cached_hashes: Dict[str, str] = {}\n\n def _is_ignored(self, rel: str) -> bool:\n try:\n r = rel.replace(os.sep, \"/\")\n # pathspec first\n if self._pspec is not None:\n if self._pspec.match_file(r):\n return True\n # fallback: prefix match\n for pat in self._ignore:\n pp = pat.replace(os.sep, \"/\")\n if r == pp or r.startswith(pp + \"/\"):\n return True\n return False\n except Exception:\n return False\n\n @classmethod\n def load_or_build(cls, root: str, *, ignore_cache: bool = False, ignore: Optional[List[str]] = None) -> \"CodeGraph\":\n g = cls(root=root, ignore=ignore)\n g.build(ignore_cache=ignore_cache)\n return g\n\n def build(self, ignore_cache: bool = False) -> None:\n cache_path = os.path.join(self.root, \".codegraph.json\")\n if (not ignore_cache) and self._load_cache_relaxed(cache_path):\n # Incremental: reindex changed and dependents\n changed, removed = self._detect_changed_files(\n self._cached_mtimes, self._cached_hashes\n )\n if not changed and not removed:\n return\n self._incremental_reindex(changed, removed)\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n return\n for dirpath, dirnames, filenames in os.walk(self.root):\n # prune ignored directories in-place\n dir_rel = os.path.relpath(dirpath, self.root)\n # remove child dirs that are ignored\n dirnames[:] = [d for d in dirnames if not self._is_ignored(os.path.join(dir_rel, d))]","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.build","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.build#L100-L142","kind":"function","name":"build","path":"examples/scripts/code_graph.py","language":"python","start_line":100,"end_line":142,"context_start_line":80,"context_end_line":162,"code":" r = rel.replace(os.sep, \"/\")\n # pathspec first\n if self._pspec is not None:\n if self._pspec.match_file(r):\n return True\n # fallback: prefix match\n for pat in self._ignore:\n pp = pat.replace(os.sep, \"/\")\n if r == pp or r.startswith(pp + \"/\"):\n return True\n return False\n except Exception:\n return False\n\n @classmethod\n def load_or_build(cls, root: str, *, ignore_cache: bool = False, ignore: Optional[List[str]] = None) -> \"CodeGraph\":\n g = cls(root=root, ignore=ignore)\n g.build(ignore_cache=ignore_cache)\n return g\n\n def build(self, ignore_cache: bool = False) -> None:\n cache_path = os.path.join(self.root, \".codegraph.json\")\n if (not ignore_cache) and self._load_cache_relaxed(cache_path):\n # Incremental: reindex changed and dependents\n changed, removed = self._detect_changed_files(\n self._cached_mtimes, self._cached_hashes\n )\n if not changed and not removed:\n return\n self._incremental_reindex(changed, removed)\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n return\n for dirpath, dirnames, filenames in os.walk(self.root):\n # prune ignored directories in-place\n dir_rel = os.path.relpath(dirpath, self.root)\n # remove child dirs that are ignored\n dirnames[:] = [d for d in dirnames if not self._is_ignored(os.path.join(dir_rel, d))]\n if self._is_ignored(dir_rel):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n fpath = os.path.join(dirpath, fn)\n if self._is_ignored(os.path.relpath(fpath, self.root)):\n continue\n try:\n src = open(fpath, \"r\", encoding=\"utf-8\").read()\n except Exception:\n continue\n try:\n tree = ast.parse(src)\n except Exception:\n continue\n self.indexed_files.append(fpath)\n self._index_module(fpath, tree)\n # Build test mapping from imports in test modules\n self._build_test_mapping()\n # Expand star imports and post-resolve call targets\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n\n def _add_symbol(self, sym: Symbol) -> None:\n self.symbols_by_fqn[sym.fqn] = sym\n self.symbols_by_name.setdefault(sym.name, []).append(sym.fqn)\n mi = self.modules.setdefault(\n sym.module, ModuleInfo(module=sym.module, file=sym.file)\n )\n if sym.fqn not in mi.defs:\n mi.defs.append(sym.fqn)\n\n def _index_module(self, path: str, tree: ast.AST) -> None:\n module = self._module_name_for_path(path)\n is_test = (\"/tests/\" in path) or (os.path.basename(path).startswith(\"test_\"))\n self.modules.setdefault(\n module, ModuleInfo(module=module, file=path, is_test=is_test)\n )\n # Add module symbol\n mod_fqn = module\n mod_name = module.split(\".\")[-1]\n self._add_symbol(","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._add_symbol","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._add_symbol#L144-L151","kind":"function","name":"_add_symbol","path":"examples/scripts/code_graph.py","language":"python","start_line":144,"end_line":151,"context_start_line":124,"context_end_line":171,"code":" fpath = os.path.join(dirpath, fn)\n if self._is_ignored(os.path.relpath(fpath, self.root)):\n continue\n try:\n src = open(fpath, \"r\", encoding=\"utf-8\").read()\n except Exception:\n continue\n try:\n tree = ast.parse(src)\n except Exception:\n continue\n self.indexed_files.append(fpath)\n self._index_module(fpath, tree)\n # Build test mapping from imports in test modules\n self._build_test_mapping()\n # Expand star imports and post-resolve call targets\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n\n def _add_symbol(self, sym: Symbol) -> None:\n self.symbols_by_fqn[sym.fqn] = sym\n self.symbols_by_name.setdefault(sym.name, []).append(sym.fqn)\n mi = self.modules.setdefault(\n sym.module, ModuleInfo(module=sym.module, file=sym.file)\n )\n if sym.fqn not in mi.defs:\n mi.defs.append(sym.fqn)\n\n def _index_module(self, path: str, tree: ast.AST) -> None:\n module = self._module_name_for_path(path)\n is_test = (\"/tests/\" in path) or (os.path.basename(path).startswith(\"test_\"))\n self.modules.setdefault(\n module, ModuleInfo(module=module, file=path, is_test=is_test)\n )\n # Add module symbol\n mod_fqn = module\n mod_name = module.split(\".\")[-1]\n self._add_symbol(\n Symbol(\n fqn=mod_fqn,\n name=mod_name,\n qualname=\"\",\n kind=\"module\",\n module=module,\n file=path,\n line=1,\n end_line=1,","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._index_module","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._index_module#L153-L195","kind":"function","name":"_index_module","path":"examples/scripts/code_graph.py","language":"python","start_line":153,"end_line":195,"context_start_line":133,"context_end_line":215,"code":" except Exception:\n continue\n self.indexed_files.append(fpath)\n self._index_module(fpath, tree)\n # Build test mapping from imports in test modules\n self._build_test_mapping()\n # Expand star imports and post-resolve call targets\n self._expand_star_imports()\n self._post_resolve_calls()\n self._save_cache(cache_path)\n\n def _add_symbol(self, sym: Symbol) -> None:\n self.symbols_by_fqn[sym.fqn] = sym\n self.symbols_by_name.setdefault(sym.name, []).append(sym.fqn)\n mi = self.modules.setdefault(\n sym.module, ModuleInfo(module=sym.module, file=sym.file)\n )\n if sym.fqn not in mi.defs:\n mi.defs.append(sym.fqn)\n\n def _index_module(self, path: str, tree: ast.AST) -> None:\n module = self._module_name_for_path(path)\n is_test = (\"/tests/\" in path) or (os.path.basename(path).startswith(\"test_\"))\n self.modules.setdefault(\n module, ModuleInfo(module=module, file=path, is_test=is_test)\n )\n # Add module symbol\n mod_fqn = module\n mod_name = module.split(\".\")[-1]\n self._add_symbol(\n Symbol(\n fqn=mod_fqn,\n name=mod_name,\n qualname=\"\",\n kind=\"module\",\n module=module,\n file=path,\n line=1,\n end_line=1,\n )\n )\n # Visit\n visitor = _ModuleVisitor(module, path)\n visitor.visit(tree)\n # Register imports\n self.modules[module].imports.update(visitor.imports)\n # Module dependency edges\n self.module_imports[module] = sorted(visitor.import_modules)\n # Record star imports for later expansion\n self.module_star_imports[module] = list(getattr(visitor, \"star_imports\", []))\n # Record __all__ exports\n self.modules[module].exports = list(getattr(visitor, \"exports\", []))\n # Register defs\n for sym in visitor.symbols:\n self._add_symbol(sym)\n # Register calls\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n # Collect pytest nodes if test module\n if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.owners_of","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.owners_of#L197-L201","kind":"function","name":"owners_of","path":"examples/scripts/code_graph.py","language":"python","start_line":197,"end_line":201,"context_start_line":177,"context_end_line":221,"code":" # Register imports\n self.modules[module].imports.update(visitor.imports)\n # Module dependency edges\n self.module_imports[module] = sorted(visitor.import_modules)\n # Record star imports for later expansion\n self.module_star_imports[module] = list(getattr(visitor, \"star_imports\", []))\n # Record __all__ exports\n self.modules[module].exports = list(getattr(visitor, \"exports\", []))\n # Register defs\n for sym in visitor.symbols:\n self._add_symbol(sym)\n # Register calls\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n # Collect pytest nodes if test module\n if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.find_symbol","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.find_symbol#L203-L204","kind":"function","name":"find_symbol","path":"examples/scripts/code_graph.py","language":"python","start_line":203,"end_line":204,"context_start_line":183,"context_end_line":224,"code":" # Record __all__ exports\n self.modules[module].exports = list(getattr(visitor, \"exports\", []))\n # Register defs\n for sym in visitor.symbols:\n self._add_symbol(sym)\n # Register calls\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n # Collect pytest nodes if test module\n if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:\n \"\"\"Ripgrep-based raw reference search (file, line_no, text).\"\"\"\n try:\n import subprocess","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.defs_in","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.defs_in#L206-L208","kind":"function","name":"defs_in","path":"examples/scripts/code_graph.py","language":"python","start_line":206,"end_line":208,"context_start_line":186,"context_end_line":228,"code":" for sym in visitor.symbols:\n self._add_symbol(sym)\n # Register calls\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n # Collect pytest nodes if test module\n if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:\n \"\"\"Ripgrep-based raw reference search (file, line_no, text).\"\"\"\n try:\n import subprocess\n\n out = subprocess.check_output([\"rg\", \"-n\", pattern, self.root], text=True)\n rows: List[Tuple[str, int, str]] = []\n for line in out.splitlines():","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.calls_of","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.calls_of#L210-L211","kind":"function","name":"calls_of","path":"examples/scripts/code_graph.py","language":"python","start_line":210,"end_line":211,"context_start_line":190,"context_end_line":231,"code":" callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n # Collect pytest nodes if test module\n if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:\n \"\"\"Ripgrep-based raw reference search (file, line_no, text).\"\"\"\n try:\n import subprocess\n\n out = subprocess.check_output([\"rg\", \"-n\", pattern, self.root], text=True)\n rows: List[Tuple[str, int, str]] = []\n for line in out.splitlines():\n try:\n fp, ln, txt = line.split(\":\", 2)\n rows.append((os.path.relpath(fp, self.root), int(ln), txt))","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.who_calls","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.who_calls#L213-L219","kind":"function","name":"who_calls","path":"examples/scripts/code_graph.py","language":"python","start_line":213,"end_line":219,"context_start_line":193,"context_end_line":239,"code":" if is_test:\n rel = os.path.relpath(path, self.root)\n self.pytest_nodes_by_module[module] = self._collect_pytest_nodes(tree, rel)\n\n def owners_of(self, symbol: str) -> List[str]:\n fqns = self.symbols_by_name.get(symbol, [])\n return sorted(\n {os.path.relpath(self.symbols_by_fqn[f].file, self.root) for f in fqns}\n )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:\n \"\"\"Ripgrep-based raw reference search (file, line_no, text).\"\"\"\n try:\n import subprocess\n\n out = subprocess.check_output([\"rg\", \"-n\", pattern, self.root], text=True)\n rows: List[Tuple[str, int, str]] = []\n for line in out.splitlines():\n try:\n fp, ln, txt = line.split(\":\", 2)\n rows.append((os.path.relpath(fp, self.root), int(ln), txt))\n except Exception:\n continue\n return rows\n except Exception:\n # Fallback: simple Python regex over indexed .py files\n rows: List[Tuple[str, int, str]] = []\n try:\n rx = re.compile(pattern)","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.search_refs","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.search_refs#L221-L252","kind":"function","name":"search_refs","path":"examples/scripts/code_graph.py","language":"python","start_line":221,"end_line":252,"context_start_line":201,"context_end_line":272,"code":" )\n\n def find_symbol(self, name: str) -> List[Symbol]:\n return [self.symbols_by_fqn[f] for f in self.symbols_by_name.get(name, [])]\n\n def defs_in(self, module: str) -> List[str]:\n mi = self.modules.get(module)\n return list(mi.defs) if mi else []\n\n def calls_of(self, fqn: str) -> List[str]:\n return [c for (caller, c) in self.calls if caller == fqn]\n\n def who_calls(self, fqn: str) -> List[str]:\n target_short = fqn.split(\".\")[-1]\n out: List[str] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append(caller)\n return out\n\n def search_refs(self, pattern: str) -> List[Tuple[str, int, str]]:\n \"\"\"Ripgrep-based raw reference search (file, line_no, text).\"\"\"\n try:\n import subprocess\n\n out = subprocess.check_output([\"rg\", \"-n\", pattern, self.root], text=True)\n rows: List[Tuple[str, int, str]] = []\n for line in out.splitlines():\n try:\n fp, ln, txt = line.split(\":\", 2)\n rows.append((os.path.relpath(fp, self.root), int(ln), txt))\n except Exception:\n continue\n return rows\n except Exception:\n # Fallback: simple Python regex over indexed .py files\n rows: List[Tuple[str, int, str]] = []\n try:\n rx = re.compile(pattern)\n except Exception:\n # If pattern is not a valid regex, escape it\n rx = re.compile(re.escape(pattern))\n for fpath in self.indexed_files:\n rel = os.path.relpath(fpath, self.root)\n try:\n with open(fpath, \"r\", encoding=\"utf-8\", errors=\"ignore\") as rf:\n for i, ln in enumerate(rf, start=1):\n if rx.search(ln):\n rows.append((rel, i, ln.rstrip(\"\\n\")))\n except Exception:\n continue\n return rows\n\n # --- Helpers --- #\n\n def module_for_file(self, path: str) -> Optional[str]:\n p = path\n if not os.path.isabs(p):\n p = os.path.abspath(os.path.join(self.root, path))\n for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.module_for_file","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.module_for_file#L256-L263","kind":"function","name":"module_for_file","path":"examples/scripts/code_graph.py","language":"python","start_line":256,"end_line":263,"context_start_line":236,"context_end_line":283,"code":" # Fallback: simple Python regex over indexed .py files\n rows: List[Tuple[str, int, str]] = []\n try:\n rx = re.compile(pattern)\n except Exception:\n # If pattern is not a valid regex, escape it\n rx = re.compile(re.escape(pattern))\n for fpath in self.indexed_files:\n rel = os.path.relpath(fpath, self.root)\n try:\n with open(fpath, \"r\", encoding=\"utf-8\", errors=\"ignore\") as rf:\n for i, ln in enumerate(rf, start=1):\n if rx.search(ln):\n rows.append((rel, i, ln.rstrip(\"\\n\")))\n except Exception:\n continue\n return rows\n\n # --- Helpers --- #\n\n def module_for_file(self, path: str) -> Optional[str]:\n p = path\n if not os.path.isabs(p):\n p = os.path.abspath(os.path.join(self.root, path))\n for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.file_for_module","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.file_for_module#L265-L267","kind":"function","name":"file_for_module","path":"examples/scripts/code_graph.py","language":"python","start_line":265,"end_line":267,"context_start_line":245,"context_end_line":287,"code":" try:\n with open(fpath, \"r\", encoding=\"utf-8\", errors=\"ignore\") as rf:\n for i, ln in enumerate(rf, start=1):\n if rx.search(ln):\n rows.append((rel, i, ln.rstrip(\"\\n\")))\n except Exception:\n continue\n return rows\n\n # --- Helpers --- #\n\n def module_for_file(self, path: str) -> Optional[str]:\n p = path\n if not os.path.isabs(p):\n p = os.path.abspath(os.path.join(self.root, path))\n for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.tests_for_module","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.tests_for_module#L269-L274","kind":"function","name":"tests_for_module","path":"examples/scripts/code_graph.py","language":"python","start_line":269,"end_line":274,"context_start_line":249,"context_end_line":294,"code":" rows.append((rel, i, ln.rstrip(\"\\n\")))\n except Exception:\n continue\n return rows\n\n # --- Helpers --- #\n\n def module_for_file(self, path: str) -> Optional[str]:\n p = path\n if not os.path.isabs(p):\n p = os.path.abspath(os.path.join(self.root, path))\n for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out\n\n def export_json(self) -> Dict[str, Any]:\n return {\n \"root\": self.root,\n \"files\": [os.path.relpath(p, self.root) for p in self.indexed_files],\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.tests_for_symbol","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.tests_for_symbol#L276-L278","kind":"function","name":"tests_for_symbol","path":"examples/scripts/code_graph.py","language":"python","start_line":276,"end_line":278,"context_start_line":256,"context_end_line":298,"code":" def module_for_file(self, path: str) -> Optional[str]:\n p = path\n if not os.path.isabs(p):\n p = os.path.abspath(os.path.join(self.root, path))\n for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out\n\n def export_json(self) -> Dict[str, Any]:\n return {\n \"root\": self.root,\n \"files\": [os.path.relpath(p, self.root) for p in self.indexed_files],\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"coverage_files\": {\n os.path.relpath(k, self.root): sorted(list(v))","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.refs_of","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.refs_of#L280-L287","kind":"function","name":"refs_of","path":"examples/scripts/code_graph.py","language":"python","start_line":280,"end_line":287,"context_start_line":260,"context_end_line":307,"code":" for mod, mi in self.modules.items():\n if os.path.abspath(mi.file) == p:\n return mod\n return None\n\n def file_for_module(self, module: str) -> Optional[str]:\n mi = self.modules.get(module)\n return mi.file if mi else None\n\n def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out\n\n def export_json(self) -> Dict[str, Any]:\n return {\n \"root\": self.root,\n \"files\": [os.path.relpath(p, self.root) for p in self.indexed_files],\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"coverage_files\": {\n os.path.relpath(k, self.root): sorted(list(v))\n for k, v in self.coverage_files.items()\n },\n \"symbol_coverage\": self.symbol_coverage,\n \"module_imports\": self.module_imports,\n }\n\n def export_sqlite(self, db_path: str) -> None:\n import sqlite3\n","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.export_json","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.export_json#L289-L303","kind":"function","name":"export_json","path":"examples/scripts/code_graph.py","language":"python","start_line":289,"end_line":303,"context_start_line":269,"context_end_line":323,"code":" def tests_for_module(self, module: str) -> List[str]:\n base = module.split(\".\")[0]\n out = set(self.module_to_tests.get(base, []))\n # include direct module key if present\n out.update(self.module_to_tests.get(module, []))\n return sorted(out)\n\n def tests_for_symbol(self, fqn: str) -> List[str]:\n mod = fqn.rsplit(\".\", 1)[0] if \".\" in fqn else fqn\n return self.tests_for_module(mod)\n\n def refs_of(self, fqn: str) -> List[Tuple[str, str]]:\n \"\"\"Return (caller_fqn, callee_match) entries that reference fqn or its short name.\"\"\"\n target_short = fqn.split(\".\")[-1]\n out: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out\n\n def export_json(self) -> Dict[str, Any]:\n return {\n \"root\": self.root,\n \"files\": [os.path.relpath(p, self.root) for p in self.indexed_files],\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"coverage_files\": {\n os.path.relpath(k, self.root): sorted(list(v))\n for k, v in self.coverage_files.items()\n },\n \"symbol_coverage\": self.symbol_coverage,\n \"module_imports\": self.module_imports,\n }\n\n def export_sqlite(self, db_path: str) -> None:\n import sqlite3\n\n conn = sqlite3.connect(db_path)\n cur = conn.cursor()\n cur.executescript(\n \"\"\"\n PRAGMA journal_mode=WAL;\n CREATE TABLE IF NOT EXISTS files(path TEXT PRIMARY KEY);\n CREATE TABLE IF NOT EXISTS modules(module TEXT PRIMARY KEY, file TEXT, is_test INT);\n CREATE TABLE IF NOT EXISTS symbols(\n fqn TEXT PRIMARY KEY, name TEXT, qualname TEXT, kind TEXT, module TEXT,\n file TEXT, line INT, end_line INT, doc TEXT, signature TEXT, returns TEXT\n );\n CREATE TABLE IF NOT EXISTS calls(caller TEXT, callee TEXT);\n CREATE TABLE IF NOT EXISTS tests_map(module TEXT, test_module TEXT);\n CREATE TABLE IF NOT EXISTS coverage(file TEXT, line INT);\n CREATE TABLE IF NOT EXISTS mod_deps(module TEXT, dep TEXT);\n \"\"\"","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.export_sqlite","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.export_sqlite#L305-L380","kind":"function","name":"export_sqlite","path":"examples/scripts/code_graph.py","language":"python","start_line":305,"end_line":380,"context_start_line":285,"context_end_line":400,"code":" if callee == fqn or callee.split(\".\")[-1] == target_short:\n out.append((caller, callee))\n return out\n\n def export_json(self) -> Dict[str, Any]:\n return {\n \"root\": self.root,\n \"files\": [os.path.relpath(p, self.root) for p in self.indexed_files],\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"coverage_files\": {\n os.path.relpath(k, self.root): sorted(list(v))\n for k, v in self.coverage_files.items()\n },\n \"symbol_coverage\": self.symbol_coverage,\n \"module_imports\": self.module_imports,\n }\n\n def export_sqlite(self, db_path: str) -> None:\n import sqlite3\n\n conn = sqlite3.connect(db_path)\n cur = conn.cursor()\n cur.executescript(\n \"\"\"\n PRAGMA journal_mode=WAL;\n CREATE TABLE IF NOT EXISTS files(path TEXT PRIMARY KEY);\n CREATE TABLE IF NOT EXISTS modules(module TEXT PRIMARY KEY, file TEXT, is_test INT);\n CREATE TABLE IF NOT EXISTS symbols(\n fqn TEXT PRIMARY KEY, name TEXT, qualname TEXT, kind TEXT, module TEXT,\n file TEXT, line INT, end_line INT, doc TEXT, signature TEXT, returns TEXT\n );\n CREATE TABLE IF NOT EXISTS calls(caller TEXT, callee TEXT);\n CREATE TABLE IF NOT EXISTS tests_map(module TEXT, test_module TEXT);\n CREATE TABLE IF NOT EXISTS coverage(file TEXT, line INT);\n CREATE TABLE IF NOT EXISTS mod_deps(module TEXT, dep TEXT);\n \"\"\"\n )\n cur.executemany(\n \"INSERT OR IGNORE INTO files(path) VALUES(?)\",\n [(os.path.relpath(f, self.root),) for f in self.indexed_files],\n )\n cur.executemany(\n \"INSERT OR REPLACE INTO modules(module,file,is_test) VALUES(?,?,?)\",\n [\n (m, os.path.relpath(mi.file, self.root), 1 if mi.is_test else 0)\n for m, mi in self.modules.items()\n ],\n )\n cur.executemany(\n \"INSERT OR REPLACE INTO symbols VALUES(?,?,?,?,?,?,?,?,?,?,?)\",\n [\n (\n s.fqn,\n s.name,\n s.qualname,\n s.kind,\n s.module,\n os.path.relpath(s.file, self.root),\n int(s.line),\n int(s.end_line),\n s.doc or \"\",\n s.signature or \"\",\n s.returns or \"\",\n )\n for s in self.symbols_by_fqn.values()\n ],\n )\n if self.calls:\n cur.executemany(\n \"INSERT INTO calls(caller,callee) VALUES(?,?)\", list(self.calls)\n )\n rows = []\n for mod, tests in self.module_to_tests.items():\n for t in tests:\n rows.append((mod, t))\n if rows:\n cur.executemany(\n \"INSERT INTO tests_map(module,test_module) VALUES(?,?)\", rows\n )\n cov_rows = []\n for f, lines in self.coverage_files.items():\n rel = os.path.relpath(f, self.root)\n cov_rows.extend([(rel, int(n)) for n in lines])\n if cov_rows:\n cur.executemany(\"INSERT INTO coverage(file,line) VALUES(?,?)\", cov_rows)\n dep_rows = []\n for m, deps in self.module_imports.items():\n for d in deps:\n dep_rows.append((m, d))\n if dep_rows:\n cur.executemany(\"INSERT INTO mod_deps(module,dep) VALUES(?,?)\", dep_rows)\n conn.commit()\n conn.close()\n\n def _module_name_for_path(self, path: str) -> str:\n rel = os.path.relpath(path, self.root)\n no_ext = rel[:-3] if rel.endswith(\".py\") else rel\n parts = no_ext.split(os.sep)\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n return \".\".join(p for p in parts if p)\n\n def _resolve_callee(\n self, module: str, callee_key: str, visitor: \"_ModuleVisitor\"\n ) -> Optional[str]:\n if \".\" in callee_key and \":\" not in callee_key:\n return callee_key\n\n if \":\" in callee_key:\n mod_alias, name = callee_key.split(\":\", 1)\n target = visitor.imports.get(mod_alias)\n if target:\n return f\"{target}.{name}\" if not target.endswith(f\".{name}\") else target","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._module_name_for_path","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._module_name_for_path#L382-L388","kind":"function","name":"_module_name_for_path","path":"examples/scripts/code_graph.py","language":"python","start_line":382,"end_line":388,"context_start_line":362,"context_end_line":408,"code":" rows.append((mod, t))\n if rows:\n cur.executemany(\n \"INSERT INTO tests_map(module,test_module) VALUES(?,?)\", rows\n )\n cov_rows = []\n for f, lines in self.coverage_files.items():\n rel = os.path.relpath(f, self.root)\n cov_rows.extend([(rel, int(n)) for n in lines])\n if cov_rows:\n cur.executemany(\"INSERT INTO coverage(file,line) VALUES(?,?)\", cov_rows)\n dep_rows = []\n for m, deps in self.module_imports.items():\n for d in deps:\n dep_rows.append((m, d))\n if dep_rows:\n cur.executemany(\"INSERT INTO mod_deps(module,dep) VALUES(?,?)\", dep_rows)\n conn.commit()\n conn.close()\n\n def _module_name_for_path(self, path: str) -> str:\n rel = os.path.relpath(path, self.root)\n no_ext = rel[:-3] if rel.endswith(\".py\") else rel\n parts = no_ext.split(os.sep)\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n return \".\".join(p for p in parts if p)\n\n def _resolve_callee(\n self, module: str, callee_key: str, visitor: \"_ModuleVisitor\"\n ) -> Optional[str]:\n if \".\" in callee_key and \":\" not in callee_key:\n return callee_key\n\n if \":\" in callee_key:\n mod_alias, name = callee_key.split(\":\", 1)\n target = visitor.imports.get(mod_alias)\n if target:\n return f\"{target}.{name}\" if not target.endswith(f\".{name}\") else target\n\n mi = self.modules.get(module)\n if mi:\n # Prefer any def with same suffix name (matches within class or function)\n for f in mi.defs:\n if f.split(\".\")[-1] == callee_key:\n return f\n","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._resolve_callee","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._resolve_callee#L390-L412","kind":"function","name":"_resolve_callee","path":"examples/scripts/code_graph.py","language":"python","start_line":390,"end_line":412,"context_start_line":370,"context_end_line":432,"code":" cov_rows.extend([(rel, int(n)) for n in lines])\n if cov_rows:\n cur.executemany(\"INSERT INTO coverage(file,line) VALUES(?,?)\", cov_rows)\n dep_rows = []\n for m, deps in self.module_imports.items():\n for d in deps:\n dep_rows.append((m, d))\n if dep_rows:\n cur.executemany(\"INSERT INTO mod_deps(module,dep) VALUES(?,?)\", dep_rows)\n conn.commit()\n conn.close()\n\n def _module_name_for_path(self, path: str) -> str:\n rel = os.path.relpath(path, self.root)\n no_ext = rel[:-3] if rel.endswith(\".py\") else rel\n parts = no_ext.split(os.sep)\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n return \".\".join(p for p in parts if p)\n\n def _resolve_callee(\n self, module: str, callee_key: str, visitor: \"_ModuleVisitor\"\n ) -> Optional[str]:\n if \".\" in callee_key and \":\" not in callee_key:\n return callee_key\n\n if \":\" in callee_key:\n mod_alias, name = callee_key.split(\":\", 1)\n target = visitor.imports.get(mod_alias)\n if target:\n return f\"{target}.{name}\" if not target.endswith(f\".{name}\") else target\n\n mi = self.modules.get(module)\n if mi:\n # Prefer any def with same suffix name (matches within class or function)\n for f in mi.defs:\n if f.split(\".\")[-1] == callee_key:\n return f\n\n tgt = visitor.imports.get(callee_key)\n if tgt:\n return tgt\n return None\n\n def _build_test_mapping(self) -> None:\n for mod, mi in self.modules.items():\n if not mi.is_test:\n continue\n for alias, target in mi.imports.items():\n # target may be module or module.symbol\n m = target.split(\".\")[0]\n self.module_to_tests.setdefault(m, []).append(mod)\n\n def _expand_star_imports(self) -> None:\n for mod, stars in self.module_star_imports.items():\n mi = self.modules.get(mod)\n if not mi:\n continue\n for star_mod in stars:\n defs = [\n f\n for f in self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._build_test_mapping","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._build_test_mapping#L414-L421","kind":"function","name":"_build_test_mapping","path":"examples/scripts/code_graph.py","language":"python","start_line":414,"end_line":421,"context_start_line":394,"context_end_line":441,"code":" return callee_key\n\n if \":\" in callee_key:\n mod_alias, name = callee_key.split(\":\", 1)\n target = visitor.imports.get(mod_alias)\n if target:\n return f\"{target}.{name}\" if not target.endswith(f\".{name}\") else target\n\n mi = self.modules.get(module)\n if mi:\n # Prefer any def with same suffix name (matches within class or function)\n for f in mi.defs:\n if f.split(\".\")[-1] == callee_key:\n return f\n\n tgt = visitor.imports.get(callee_key)\n if tgt:\n return tgt\n return None\n\n def _build_test_mapping(self) -> None:\n for mod, mi in self.modules.items():\n if not mi.is_test:\n continue\n for alias, target in mi.imports.items():\n # target may be module or module.symbol\n m = target.split(\".\")[0]\n self.module_to_tests.setdefault(m, []).append(mod)\n\n def _expand_star_imports(self) -> None:\n for mod, stars in self.module_star_imports.items():\n mi = self.modules.get(mod)\n if not mi:\n continue\n for star_mod in stars:\n defs = [\n f\n for f in self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).defs\n ]\n exports = set(\n self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).exports\n or []\n )\n for fqn in defs:","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._expand_star_imports","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._expand_star_imports#L423-L449","kind":"function","name":"_expand_star_imports","path":"examples/scripts/code_graph.py","language":"python","start_line":423,"end_line":449,"context_start_line":403,"context_end_line":469,"code":" if mi:\n # Prefer any def with same suffix name (matches within class or function)\n for f in mi.defs:\n if f.split(\".\")[-1] == callee_key:\n return f\n\n tgt = visitor.imports.get(callee_key)\n if tgt:\n return tgt\n return None\n\n def _build_test_mapping(self) -> None:\n for mod, mi in self.modules.items():\n if not mi.is_test:\n continue\n for alias, target in mi.imports.items():\n # target may be module or module.symbol\n m = target.split(\".\")[0]\n self.module_to_tests.setdefault(m, []).append(mod)\n\n def _expand_star_imports(self) -> None:\n for mod, stars in self.module_star_imports.items():\n mi = self.modules.get(mod)\n if not mi:\n continue\n for star_mod in stars:\n defs = [\n f\n for f in self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).defs\n ]\n exports = set(\n self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).exports\n or []\n )\n for fqn in defs:\n name = fqn.split(\".\")[-1]\n if exports:\n if name not in exports:\n continue\n elif name.startswith(\"_\"):\n continue\n if name not in mi.imports:\n mi.imports[name] = f\"{star_mod}.{name}\"\n\n def _post_resolve_calls(self) -> None:\n # After imports expanded, try to resolve unresolved simple names\n new_calls: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if \".\" in callee:\n new_calls.append((caller, callee))\n continue\n # Find caller module\n caller_mod = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n imports = self.modules.get(\n caller_mod, ModuleInfo(module=caller_mod, file=\"\")\n ).imports\n tgt = imports.get(callee)\n if tgt:\n new_calls.append((caller, tgt))\n else:\n # leave as-is\n new_calls.append((caller, callee))\n self.calls = new_calls","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._post_resolve_calls","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._post_resolve_calls#L451-L469","kind":"function","name":"_post_resolve_calls","path":"examples/scripts/code_graph.py","language":"python","start_line":451,"end_line":469,"context_start_line":431,"context_end_line":489,"code":" for f in self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).defs\n ]\n exports = set(\n self.modules.get(\n star_mod, ModuleInfo(module=star_mod, file=\"\")\n ).exports\n or []\n )\n for fqn in defs:\n name = fqn.split(\".\")[-1]\n if exports:\n if name not in exports:\n continue\n elif name.startswith(\"_\"):\n continue\n if name not in mi.imports:\n mi.imports[name] = f\"{star_mod}.{name}\"\n\n def _post_resolve_calls(self) -> None:\n # After imports expanded, try to resolve unresolved simple names\n new_calls: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if \".\" in callee:\n new_calls.append((caller, callee))\n continue\n # Find caller module\n caller_mod = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n imports = self.modules.get(\n caller_mod, ModuleInfo(module=caller_mod, file=\"\")\n ).imports\n tgt = imports.get(callee)\n if tgt:\n new_calls.append((caller, tgt))\n else:\n # leave as-is\n new_calls.append((caller, callee))\n self.calls = new_calls\n\n def unresolved_calls(self) -> List[Tuple[str, str]]:\n return [\n (a, c)\n for (a, c) in self.calls\n if \".\" not in c and not self._is_builtin_name(c)\n ]\n\n def _collect_pytest_nodes(self, tree: ast.AST, rel_path: str) -> List[str]:\n nodes: List[str] = []\n # top-level test_* functions\n for n in getattr(tree, \"body\", []) or []:\n if isinstance(n, ast.FunctionDef) and n.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, None, n))\n if isinstance(n, ast.ClassDef) and n.name.startswith(\"Test\"):\n cls = n.name\n for m in getattr(n, \"body\", []) or []:\n if isinstance(m, ast.FunctionDef) and m.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, cls, m))\n return nodes","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.unresolved_calls","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.unresolved_calls#L471-L476","kind":"function","name":"unresolved_calls","path":"examples/scripts/code_graph.py","language":"python","start_line":471,"end_line":476,"context_start_line":451,"context_end_line":496,"code":" def _post_resolve_calls(self) -> None:\n # After imports expanded, try to resolve unresolved simple names\n new_calls: List[Tuple[str, str]] = []\n for caller, callee in self.calls:\n if \".\" in callee:\n new_calls.append((caller, callee))\n continue\n # Find caller module\n caller_mod = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n imports = self.modules.get(\n caller_mod, ModuleInfo(module=caller_mod, file=\"\")\n ).imports\n tgt = imports.get(callee)\n if tgt:\n new_calls.append((caller, tgt))\n else:\n # leave as-is\n new_calls.append((caller, callee))\n self.calls = new_calls\n\n def unresolved_calls(self) -> List[Tuple[str, str]]:\n return [\n (a, c)\n for (a, c) in self.calls\n if \".\" not in c and not self._is_builtin_name(c)\n ]\n\n def _collect_pytest_nodes(self, tree: ast.AST, rel_path: str) -> List[str]:\n nodes: List[str] = []\n # top-level test_* functions\n for n in getattr(tree, \"body\", []) or []:\n if isinstance(n, ast.FunctionDef) and n.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, None, n))\n if isinstance(n, ast.ClassDef) and n.name.startswith(\"Test\"):\n cls = n.name\n for m in getattr(n, \"body\", []) or []:\n if isinstance(m, ast.FunctionDef) and m.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, cls, m))\n return nodes\n\n def _expand_parametrize(\n self, rel_path: str, cls: Optional[str], fn: ast.FunctionDef\n ) -> List[str]:\n base = f\"{rel_path}::\" + (f\"{cls}::\" if cls else \"\") + fn.name\n # Look for @pytest.mark.parametrize(\"arg\", [vals])\n total: List[str] = []","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._collect_pytest_nodes","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._collect_pytest_nodes#L478-L489","kind":"function","name":"_collect_pytest_nodes","path":"examples/scripts/code_graph.py","language":"python","start_line":478,"end_line":489,"context_start_line":458,"context_end_line":509,"code":" # Find caller module\n caller_mod = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n imports = self.modules.get(\n caller_mod, ModuleInfo(module=caller_mod, file=\"\")\n ).imports\n tgt = imports.get(callee)\n if tgt:\n new_calls.append((caller, tgt))\n else:\n # leave as-is\n new_calls.append((caller, callee))\n self.calls = new_calls\n\n def unresolved_calls(self) -> List[Tuple[str, str]]:\n return [\n (a, c)\n for (a, c) in self.calls\n if \".\" not in c and not self._is_builtin_name(c)\n ]\n\n def _collect_pytest_nodes(self, tree: ast.AST, rel_path: str) -> List[str]:\n nodes: List[str] = []\n # top-level test_* functions\n for n in getattr(tree, \"body\", []) or []:\n if isinstance(n, ast.FunctionDef) and n.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, None, n))\n if isinstance(n, ast.ClassDef) and n.name.startswith(\"Test\"):\n cls = n.name\n for m in getattr(n, \"body\", []) or []:\n if isinstance(m, ast.FunctionDef) and m.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, cls, m))\n return nodes\n\n def _expand_parametrize(\n self, rel_path: str, cls: Optional[str], fn: ast.FunctionDef\n ) -> List[str]:\n base = f\"{rel_path}::\" + (f\"{cls}::\" if cls else \"\") + fn.name\n # Look for @pytest.mark.parametrize(\"arg\", [vals])\n total: List[str] = []\n params: List[int] = []\n try:\n for dec in getattr(fn, \"decorator_list\", []) or []:\n # pytest.mark.parametrize(...)\n if (\n isinstance(dec, ast.Call)\n and isinstance(dec.func, ast.Attribute)\n and dec.func.attr == \"parametrize\"\n ):\n # estimate number of cases from second arg list length\n if len(dec.args) >= 2 and isinstance(\n dec.args[1], (ast.List, ast.Tuple)\n ):","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._expand_parametrize","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._expand_parametrize#L491-L523","kind":"function","name":"_expand_parametrize","path":"examples/scripts/code_graph.py","language":"python","start_line":491,"end_line":523,"context_start_line":471,"context_end_line":543,"code":" def unresolved_calls(self) -> List[Tuple[str, str]]:\n return [\n (a, c)\n for (a, c) in self.calls\n if \".\" not in c and not self._is_builtin_name(c)\n ]\n\n def _collect_pytest_nodes(self, tree: ast.AST, rel_path: str) -> List[str]:\n nodes: List[str] = []\n # top-level test_* functions\n for n in getattr(tree, \"body\", []) or []:\n if isinstance(n, ast.FunctionDef) and n.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, None, n))\n if isinstance(n, ast.ClassDef) and n.name.startswith(\"Test\"):\n cls = n.name\n for m in getattr(n, \"body\", []) or []:\n if isinstance(m, ast.FunctionDef) and m.name.startswith(\"test_\"):\n nodes.extend(self._expand_parametrize(rel_path, cls, m))\n return nodes\n\n def _expand_parametrize(\n self, rel_path: str, cls: Optional[str], fn: ast.FunctionDef\n ) -> List[str]:\n base = f\"{rel_path}::\" + (f\"{cls}::\" if cls else \"\") + fn.name\n # Look for @pytest.mark.parametrize(\"arg\", [vals])\n total: List[str] = []\n params: List[int] = []\n try:\n for dec in getattr(fn, \"decorator_list\", []) or []:\n # pytest.mark.parametrize(...)\n if (\n isinstance(dec, ast.Call)\n and isinstance(dec.func, ast.Attribute)\n and dec.func.attr == \"parametrize\"\n ):\n # estimate number of cases from second arg list length\n if len(dec.args) >= 2 and isinstance(\n dec.args[1], (ast.List, ast.Tuple)\n ):\n params.append(len(dec.args[1].elts))\n except Exception:\n pass\n if params:\n count: int = 1\n for k in params:\n try:\n count *= int(k)\n except Exception:\n count = max(count, 1)\n for i in range(count):\n total.append(f\"{base}[{i}]\")\n return total\n return [base]\n\n # --- Cache --- #\n\n def _try_load_cache(self, cache_path: str) -> bool:\n try:\n if not os.path.exists(cache_path):\n return False\n data = json.loads(open(cache_path, \"r\", encoding=\"utf-8\").read())\n if str(data.get(\"version\", \"\")) != \"3\":\n return False\n # Verify mtimes and hashes\n files = data.get(\"indexed_files\", [])\n mt = data.get(\"mtimes\", {})\n hh = data.get(\"hashes\", {})\n for f in files:\n if not os.path.exists(f):\n return False\n if int(os.path.getmtime(f)) != int(mt.get(f, 0)):\n return False\n if self._file_hash(f) != str(hh.get(f, \"\")):","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._try_load_cache","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._try_load_cache#L527-L559","kind":"function","name":"_try_load_cache","path":"examples/scripts/code_graph.py","language":"python","start_line":527,"end_line":559,"context_start_line":507,"context_end_line":579,"code":" if len(dec.args) >= 2 and isinstance(\n dec.args[1], (ast.List, ast.Tuple)\n ):\n params.append(len(dec.args[1].elts))\n except Exception:\n pass\n if params:\n count: int = 1\n for k in params:\n try:\n count *= int(k)\n except Exception:\n count = max(count, 1)\n for i in range(count):\n total.append(f\"{base}[{i}]\")\n return total\n return [base]\n\n # --- Cache --- #\n\n def _try_load_cache(self, cache_path: str) -> bool:\n try:\n if not os.path.exists(cache_path):\n return False\n data = json.loads(open(cache_path, \"r\", encoding=\"utf-8\").read())\n if str(data.get(\"version\", \"\")) != \"3\":\n return False\n # Verify mtimes and hashes\n files = data.get(\"indexed_files\", [])\n mt = data.get(\"mtimes\", {})\n hh = data.get(\"hashes\", {})\n for f in files:\n if not os.path.exists(f):\n return False\n if int(os.path.getmtime(f)) != int(mt.get(f, 0)):\n return False\n if self._file_hash(f) != str(hh.get(f, \"\")):\n return False\n # Load\n self.indexed_files = files\n for s in data.get(\"symbols\", []):\n sym = Symbol(**s)\n self._add_symbol(sym)\n for mod, mi in data.get(\"modules\", {}).items():\n self.modules[mod] = ModuleInfo(**mi)\n self.calls = [tuple(x) for x in data.get(\"calls\", [])]\n self.module_to_tests = data.get(\"module_to_tests\", {})\n self.module_imports = data.get(\"module_imports\", {})\n self._cached_mtimes = {k: int(v) for k, v in (mt or {}).items()}\n self._cached_hashes = {k: str(v) for k, v in (hh or {}).items()}\n return True\n except Exception:\n return False\n\n def _load_cache_relaxed(self, cache_path: str) -> bool:\n try:\n if not os.path.exists(cache_path):\n return False\n data = json.loads(open(cache_path, \"r\", encoding=\"utf-8\").read())\n if str(data.get(\"version\", \"\")) != \"3\":\n return False\n self.indexed_files = data.get(\"indexed_files\", [])\n for s in data.get(\"symbols\", []):\n sym = Symbol(**s)\n self._add_symbol(sym)\n for mod, mi in data.get(\"modules\", {}).items():\n self.modules[mod] = ModuleInfo(**mi)\n self.calls = [tuple(x) for x in data.get(\"calls\", [])]\n self.module_to_tests = data.get(\"module_to_tests\", {})\n self.module_imports = data.get(\"module_imports\", {})\n self._cached_mtimes = {\n k: int(v) for k, v in (data.get(\"mtimes\", {}) or {}).items()\n }","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._load_cache_relaxed","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._load_cache_relaxed#L561-L585","kind":"function","name":"_load_cache_relaxed","path":"examples/scripts/code_graph.py","language":"python","start_line":561,"end_line":585,"context_start_line":541,"context_end_line":605,"code":" if int(os.path.getmtime(f)) != int(mt.get(f, 0)):\n return False\n if self._file_hash(f) != str(hh.get(f, \"\")):\n return False\n # Load\n self.indexed_files = files\n for s in data.get(\"symbols\", []):\n sym = Symbol(**s)\n self._add_symbol(sym)\n for mod, mi in data.get(\"modules\", {}).items():\n self.modules[mod] = ModuleInfo(**mi)\n self.calls = [tuple(x) for x in data.get(\"calls\", [])]\n self.module_to_tests = data.get(\"module_to_tests\", {})\n self.module_imports = data.get(\"module_imports\", {})\n self._cached_mtimes = {k: int(v) for k, v in (mt or {}).items()}\n self._cached_hashes = {k: str(v) for k, v in (hh or {}).items()}\n return True\n except Exception:\n return False\n\n def _load_cache_relaxed(self, cache_path: str) -> bool:\n try:\n if not os.path.exists(cache_path):\n return False\n data = json.loads(open(cache_path, \"r\", encoding=\"utf-8\").read())\n if str(data.get(\"version\", \"\")) != \"3\":\n return False\n self.indexed_files = data.get(\"indexed_files\", [])\n for s in data.get(\"symbols\", []):\n sym = Symbol(**s)\n self._add_symbol(sym)\n for mod, mi in data.get(\"modules\", {}).items():\n self.modules[mod] = ModuleInfo(**mi)\n self.calls = [tuple(x) for x in data.get(\"calls\", [])]\n self.module_to_tests = data.get(\"module_to_tests\", {})\n self.module_imports = data.get(\"module_imports\", {})\n self._cached_mtimes = {\n k: int(v) for k, v in (data.get(\"mtimes\", {}) or {}).items()\n }\n self._cached_hashes = {\n k: str(v) for k, v in (data.get(\"hashes\", {}) or {}).items()\n }\n return True\n except Exception:\n return False\n\n def _detect_changed_files(\n self, old_mt: Dict[str, int], old_hh: Dict[str, str]\n ) -> Tuple[List[str], List[str]]:\n curr_files: List[str] = []\n for dirpath, dirnames, filenames in os.walk(self.root):\n dir_rel = os.path.relpath(dirpath, self.root)\n dirnames[:] = [d for d in dirnames if not self._is_ignored(os.path.join(dir_rel, d))]\n if self._is_ignored(dir_rel):\n continue\n for fn in filenames:\n if fn.endswith(\".py\"):\n fp = os.path.join(dirpath, fn)\n if self._is_ignored(os.path.relpath(fp, self.root)):\n continue\n curr_files.append(fp)\n curr = set(curr_files)\n prev = set(self.indexed_files or [])\n removed = list(prev - curr)\n added = list(curr - prev)","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._detect_changed_files","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._detect_changed_files#L587-L617","kind":"function","name":"_detect_changed_files","path":"examples/scripts/code_graph.py","language":"python","start_line":587,"end_line":617,"context_start_line":567,"context_end_line":637,"code":" return False\n self.indexed_files = data.get(\"indexed_files\", [])\n for s in data.get(\"symbols\", []):\n sym = Symbol(**s)\n self._add_symbol(sym)\n for mod, mi in data.get(\"modules\", {}).items():\n self.modules[mod] = ModuleInfo(**mi)\n self.calls = [tuple(x) for x in data.get(\"calls\", [])]\n self.module_to_tests = data.get(\"module_to_tests\", {})\n self.module_imports = data.get(\"module_imports\", {})\n self._cached_mtimes = {\n k: int(v) for k, v in (data.get(\"mtimes\", {}) or {}).items()\n }\n self._cached_hashes = {\n k: str(v) for k, v in (data.get(\"hashes\", {}) or {}).items()\n }\n return True\n except Exception:\n return False\n\n def _detect_changed_files(\n self, old_mt: Dict[str, int], old_hh: Dict[str, str]\n ) -> Tuple[List[str], List[str]]:\n curr_files: List[str] = []\n for dirpath, dirnames, filenames in os.walk(self.root):\n dir_rel = os.path.relpath(dirpath, self.root)\n dirnames[:] = [d for d in dirnames if not self._is_ignored(os.path.join(dir_rel, d))]\n if self._is_ignored(dir_rel):\n continue\n for fn in filenames:\n if fn.endswith(\".py\"):\n fp = os.path.join(dirpath, fn)\n if self._is_ignored(os.path.relpath(fp, self.root)):\n continue\n curr_files.append(fp)\n curr = set(curr_files)\n prev = set(self.indexed_files or [])\n removed = list(prev - curr)\n added = list(curr - prev)\n changed: List[str] = list(added)\n for f in curr & prev:\n try:\n mt = int(os.path.getmtime(f))\n hh = self._file_hash(f)\n except Exception:\n changed.append(f)\n continue\n if old_mt.get(f) != mt or old_hh.get(f) != hh:\n changed.append(f)\n self.indexed_files = sorted(list(curr))\n return sorted(set(changed)), sorted(removed)\n\n def _incremental_reindex(\n self, changed_files: List[str], removed_files: List[str]\n ) -> None:\n # purge removed modules\n for f in removed_files:\n m = self._module_name_for_path(f)\n if m in self.modules:\n to_remove = [\n fqn for fqn, s in list(self.symbols_by_fqn.items()) if s.module == m\n ]\n for fqn in to_remove:\n s = self.symbols_by_fqn.pop(fqn, None)\n if s:\n self.symbols_by_name[s.name] = [\n x for x in self.symbols_by_name.get(s.name, []) if x != fqn\n ]\n self.calls = [\n (a, b) for (a, b) in self.calls if not a.startswith(m + \".\")\n ]","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._incremental_reindex","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._incremental_reindex#L619-L653","kind":"function","name":"_incremental_reindex","path":"examples/scripts/code_graph.py","language":"python","start_line":619,"end_line":653,"context_start_line":599,"context_end_line":673,"code":" if self._is_ignored(os.path.relpath(fp, self.root)):\n continue\n curr_files.append(fp)\n curr = set(curr_files)\n prev = set(self.indexed_files or [])\n removed = list(prev - curr)\n added = list(curr - prev)\n changed: List[str] = list(added)\n for f in curr & prev:\n try:\n mt = int(os.path.getmtime(f))\n hh = self._file_hash(f)\n except Exception:\n changed.append(f)\n continue\n if old_mt.get(f) != mt or old_hh.get(f) != hh:\n changed.append(f)\n self.indexed_files = sorted(list(curr))\n return sorted(set(changed)), sorted(removed)\n\n def _incremental_reindex(\n self, changed_files: List[str], removed_files: List[str]\n ) -> None:\n # purge removed modules\n for f in removed_files:\n m = self._module_name_for_path(f)\n if m in self.modules:\n to_remove = [\n fqn for fqn, s in list(self.symbols_by_fqn.items()) if s.module == m\n ]\n for fqn in to_remove:\n s = self.symbols_by_fqn.pop(fqn, None)\n if s:\n self.symbols_by_name[s.name] = [\n x for x in self.symbols_by_name.get(s.name, []) if x != fqn\n ]\n self.calls = [\n (a, b) for (a, b) in self.calls if not a.startswith(m + \".\")\n ]\n self.modules.pop(m, None)\n self.module_imports.pop(m, None)\n self.module_star_imports.pop(m, None)\n mods = {self._module_name_for_path(f) for f in changed_files}\n # include dependents via reverse import graph\n rev = self._reverse_imports()\n queue = list(mods)\n seen = set(mods)\n while queue:\n m = queue.pop(0)\n for dep in rev.get(m, []):\n if dep not in seen:\n seen.add(dep)\n queue.append(dep)\n for m in seen:\n self._reindex_module(m)\n\n def _reverse_imports(self) -> Dict[str, List[str]]:\n rev: Dict[str, List[str]] = {}\n for m, deps in self.module_imports.items():\n for d in deps:\n rev.setdefault(d, []).append(m)\n return rev\n\n def _reindex_module(self, module: str) -> None:\n mi = self.modules.get(module)\n if not mi:\n return\n # remove existing symbols and calls for this module\n to_remove = [\n fqn for fqn, s in self.symbols_by_fqn.items() if s.module == module\n ]\n for fqn in to_remove:\n s = self.symbols_by_fqn.pop(fqn, None)\n if s:\n lst = self.symbols_by_name.get(s.name, [])","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._reverse_imports","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._reverse_imports#L655-L660","kind":"function","name":"_reverse_imports","path":"examples/scripts/code_graph.py","language":"python","start_line":655,"end_line":660,"context_start_line":635,"context_end_line":680,"code":" self.calls = [\n (a, b) for (a, b) in self.calls if not a.startswith(m + \".\")\n ]\n self.modules.pop(m, None)\n self.module_imports.pop(m, None)\n self.module_star_imports.pop(m, None)\n mods = {self._module_name_for_path(f) for f in changed_files}\n # include dependents via reverse import graph\n rev = self._reverse_imports()\n queue = list(mods)\n seen = set(mods)\n while queue:\n m = queue.pop(0)\n for dep in rev.get(m, []):\n if dep not in seen:\n seen.add(dep)\n queue.append(dep)\n for m in seen:\n self._reindex_module(m)\n\n def _reverse_imports(self) -> Dict[str, List[str]]:\n rev: Dict[str, List[str]] = {}\n for m, deps in self.module_imports.items():\n for d in deps:\n rev.setdefault(d, []).append(m)\n return rev\n\n def _reindex_module(self, module: str) -> None:\n mi = self.modules.get(module)\n if not mi:\n return\n # remove existing symbols and calls for this module\n to_remove = [\n fqn for fqn, s in self.symbols_by_fqn.items() if s.module == module\n ]\n for fqn in to_remove:\n s = self.symbols_by_fqn.pop(fqn, None)\n if s:\n lst = self.symbols_by_name.get(s.name, [])\n self.symbols_by_name[s.name] = [x for x in lst if x != fqn]\n self.calls = [(a, b) for (a, b) in self.calls if not a.startswith(module + \".\")]\n # reset import maps for this module\n self.modules[module].imports = {}\n self.module_imports[module] = []\n self.module_star_imports[module] = []\n # re-parse","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._reindex_module","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._reindex_module#L662-L696","kind":"function","name":"_reindex_module","path":"examples/scripts/code_graph.py","language":"python","start_line":662,"end_line":696,"context_start_line":642,"context_end_line":716,"code":" # include dependents via reverse import graph\n rev = self._reverse_imports()\n queue = list(mods)\n seen = set(mods)\n while queue:\n m = queue.pop(0)\n for dep in rev.get(m, []):\n if dep not in seen:\n seen.add(dep)\n queue.append(dep)\n for m in seen:\n self._reindex_module(m)\n\n def _reverse_imports(self) -> Dict[str, List[str]]:\n rev: Dict[str, List[str]] = {}\n for m, deps in self.module_imports.items():\n for d in deps:\n rev.setdefault(d, []).append(m)\n return rev\n\n def _reindex_module(self, module: str) -> None:\n mi = self.modules.get(module)\n if not mi:\n return\n # remove existing symbols and calls for this module\n to_remove = [\n fqn for fqn, s in self.symbols_by_fqn.items() if s.module == module\n ]\n for fqn in to_remove:\n s = self.symbols_by_fqn.pop(fqn, None)\n if s:\n lst = self.symbols_by_name.get(s.name, [])\n self.symbols_by_name[s.name] = [x for x in lst if x != fqn]\n self.calls = [(a, b) for (a, b) in self.calls if not a.startswith(module + \".\")]\n # reset import maps for this module\n self.modules[module].imports = {}\n self.module_imports[module] = []\n self.module_star_imports[module] = []\n # re-parse\n try:\n src = open(mi.file, \"r\", encoding=\"utf-8\").read()\n tree = ast.parse(src)\n except Exception:\n return\n visitor = _ModuleVisitor(module, mi.file)\n visitor.visit(tree)\n self.modules[module].imports.update(visitor.imports)\n self.module_imports[module] = sorted(visitor.import_modules)\n self.module_star_imports[module] = list(getattr(visitor, \"star_imports\", []))\n self.modules[module].exports = list(getattr(visitor, \"exports\", []))\n for sym in visitor.symbols:\n self._add_symbol(sym)\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n\n def _save_cache(self, cache_path: str) -> None:\n try:\n mt = {f: int(os.path.getmtime(f)) for f in self.indexed_files}\n hh = {f: self._file_hash(f) for f in self.indexed_files}\n data = {\n \"version\": \"3\",\n \"indexed_files\": self.indexed_files,\n \"mtimes\": mt,\n \"hashes\": hh,\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"module_imports\": self.module_imports,\n }\n open(cache_path, \"w\", encoding=\"utf-8\").write(json.dumps(data))\n except Exception:\n pass\n","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._save_cache","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._save_cache#L698-L715","kind":"function","name":"_save_cache","path":"examples/scripts/code_graph.py","language":"python","start_line":698,"end_line":715,"context_start_line":678,"context_end_line":735,"code":" self.module_imports[module] = []\n self.module_star_imports[module] = []\n # re-parse\n try:\n src = open(mi.file, \"r\", encoding=\"utf-8\").read()\n tree = ast.parse(src)\n except Exception:\n return\n visitor = _ModuleVisitor(module, mi.file)\n visitor.visit(tree)\n self.modules[module].imports.update(visitor.imports)\n self.module_imports[module] = sorted(visitor.import_modules)\n self.module_star_imports[module] = list(getattr(visitor, \"star_imports\", []))\n self.modules[module].exports = list(getattr(visitor, \"exports\", []))\n for sym in visitor.symbols:\n self._add_symbol(sym)\n for caller, callee_key in visitor.calls:\n callee_fqn = self._resolve_callee(module, callee_key, visitor)\n self.calls.append((caller, callee_fqn or callee_key))\n\n def _save_cache(self, cache_path: str) -> None:\n try:\n mt = {f: int(os.path.getmtime(f)) for f in self.indexed_files}\n hh = {f: self._file_hash(f) for f in self.indexed_files}\n data = {\n \"version\": \"3\",\n \"indexed_files\": self.indexed_files,\n \"mtimes\": mt,\n \"hashes\": hh,\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"module_imports\": self.module_imports,\n }\n open(cache_path, \"w\", encoding=\"utf-8\").write(json.dumps(data))\n except Exception:\n pass\n\n def _sym_to_dict(self, s: Symbol) -> Dict[str, Any]:\n return {\n \"fqn\": s.fqn,\n \"name\": s.name,\n \"qualname\": s.qualname,\n \"kind\": s.kind,\n \"module\": s.module,\n \"file\": s.file,\n \"line\": s.line,\n \"end_line\": s.end_line,\n \"doc\": s.doc,\n \"signature\": s.signature,\n \"returns\": s.returns,\n }\n\n def _mi_to_dict(self, mi: ModuleInfo) -> Dict[str, Any]:\n return {\n \"module\": mi.module,\n \"file\": mi.file,","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._sym_to_dict","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._sym_to_dict#L717-L730","kind":"function","name":"_sym_to_dict","path":"examples/scripts/code_graph.py","language":"python","start_line":717,"end_line":730,"context_start_line":697,"context_end_line":750,"code":"\n def _save_cache(self, cache_path: str) -> None:\n try:\n mt = {f: int(os.path.getmtime(f)) for f in self.indexed_files}\n hh = {f: self._file_hash(f) for f in self.indexed_files}\n data = {\n \"version\": \"3\",\n \"indexed_files\": self.indexed_files,\n \"mtimes\": mt,\n \"hashes\": hh,\n \"symbols\": [self._sym_to_dict(s) for s in self.symbols_by_fqn.values()],\n \"modules\": {k: self._mi_to_dict(v) for k, v in self.modules.items()},\n \"calls\": self.calls,\n \"module_to_tests\": self.module_to_tests,\n \"module_imports\": self.module_imports,\n }\n open(cache_path, \"w\", encoding=\"utf-8\").write(json.dumps(data))\n except Exception:\n pass\n\n def _sym_to_dict(self, s: Symbol) -> Dict[str, Any]:\n return {\n \"fqn\": s.fqn,\n \"name\": s.name,\n \"qualname\": s.qualname,\n \"kind\": s.kind,\n \"module\": s.module,\n \"file\": s.file,\n \"line\": s.line,\n \"end_line\": s.end_line,\n \"doc\": s.doc,\n \"signature\": s.signature,\n \"returns\": s.returns,\n }\n\n def _mi_to_dict(self, mi: ModuleInfo) -> Dict[str, Any]:\n return {\n \"module\": mi.module,\n \"file\": mi.file,\n \"is_test\": mi.is_test,\n \"imports\": mi.imports,\n \"defs\": mi.defs,\n \"exports\": mi.exports,\n }\n\n def _is_builtin_name(self, name: str) -> bool:\n try:\n import builtins as _bi # type: ignore\n\n if hasattr(_bi, name):\n return True\n except Exception:\n pass\n return name in {","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._mi_to_dict","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._mi_to_dict#L732-L740","kind":"function","name":"_mi_to_dict","path":"examples/scripts/code_graph.py","language":"python","start_line":732,"end_line":740,"context_start_line":712,"context_end_line":760,"code":" }\n open(cache_path, \"w\", encoding=\"utf-8\").write(json.dumps(data))\n except Exception:\n pass\n\n def _sym_to_dict(self, s: Symbol) -> Dict[str, Any]:\n return {\n \"fqn\": s.fqn,\n \"name\": s.name,\n \"qualname\": s.qualname,\n \"kind\": s.kind,\n \"module\": s.module,\n \"file\": s.file,\n \"line\": s.line,\n \"end_line\": s.end_line,\n \"doc\": s.doc,\n \"signature\": s.signature,\n \"returns\": s.returns,\n }\n\n def _mi_to_dict(self, mi: ModuleInfo) -> Dict[str, Any]:\n return {\n \"module\": mi.module,\n \"file\": mi.file,\n \"is_test\": mi.is_test,\n \"imports\": mi.imports,\n \"defs\": mi.defs,\n \"exports\": mi.exports,\n }\n\n def _is_builtin_name(self, name: str) -> bool:\n try:\n import builtins as _bi # type: ignore\n\n if hasattr(_bi, name):\n return True\n except Exception:\n pass\n return name in {\n \"super\",\n \"property\",\n \"globals\",\n \"locals\",\n \"__import__\",\n \"print\",\n \"len\",\n \"range\",\n \"dict\",\n \"list\",","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._is_builtin_name","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._is_builtin_name#L742-L783","kind":"function","name":"_is_builtin_name","path":"examples/scripts/code_graph.py","language":"python","start_line":742,"end_line":783,"context_start_line":722,"context_end_line":803,"code":" \"kind\": s.kind,\n \"module\": s.module,\n \"file\": s.file,\n \"line\": s.line,\n \"end_line\": s.end_line,\n \"doc\": s.doc,\n \"signature\": s.signature,\n \"returns\": s.returns,\n }\n\n def _mi_to_dict(self, mi: ModuleInfo) -> Dict[str, Any]:\n return {\n \"module\": mi.module,\n \"file\": mi.file,\n \"is_test\": mi.is_test,\n \"imports\": mi.imports,\n \"defs\": mi.defs,\n \"exports\": mi.exports,\n }\n\n def _is_builtin_name(self, name: str) -> bool:\n try:\n import builtins as _bi # type: ignore\n\n if hasattr(_bi, name):\n return True\n except Exception:\n pass\n return name in {\n \"super\",\n \"property\",\n \"globals\",\n \"locals\",\n \"__import__\",\n \"print\",\n \"len\",\n \"range\",\n \"dict\",\n \"list\",\n \"set\",\n \"tuple\",\n \"int\",\n \"float\",\n \"bool\",\n \"max\",\n \"min\",\n \"sum\",\n \"open\",\n \"enumerate\",\n \"zip\",\n \"map\",\n \"filter\",\n \"round\",\n \"any\",\n \"all\",\n \"sorted\",\n \"hasattr\",\n \"getattr\",\n \"setattr\",\n \"isinstance\",\n \"issubclass\",\n }\n\n def _file_hash(self, path: str) -> str:\n try:\n import hashlib\n\n with open(path, \"rb\") as rf:\n return hashlib.sha1(rf.read()).hexdigest()\n except Exception:\n return \"\"\n\n # --- Coverage --- #\n\n def attach_coverage_from_xml(self, xml_path: str) -> None:\n try:\n import xml.etree.ElementTree as ET\n\n tree = ET.parse(xml_path)\n root = tree.getroot()\n files_hits: Dict[str, set[int]] = {}\n # coverage.py XML: ... ","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._file_hash","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._file_hash#L785-L792","kind":"function","name":"_file_hash","path":"examples/scripts/code_graph.py","language":"python","start_line":785,"end_line":792,"context_start_line":765,"context_end_line":812,"code":" \"bool\",\n \"max\",\n \"min\",\n \"sum\",\n \"open\",\n \"enumerate\",\n \"zip\",\n \"map\",\n \"filter\",\n \"round\",\n \"any\",\n \"all\",\n \"sorted\",\n \"hasattr\",\n \"getattr\",\n \"setattr\",\n \"isinstance\",\n \"issubclass\",\n }\n\n def _file_hash(self, path: str) -> str:\n try:\n import hashlib\n\n with open(path, \"rb\") as rf:\n return hashlib.sha1(rf.read()).hexdigest()\n except Exception:\n return \"\"\n\n # --- Coverage --- #\n\n def attach_coverage_from_xml(self, xml_path: str) -> None:\n try:\n import xml.etree.ElementTree as ET\n\n tree = ET.parse(xml_path)\n root = tree.getroot()\n files_hits: Dict[str, set[int]] = {}\n # coverage.py XML: ... \n for cls in root.findall(\".//class\"):\n fn = cls.attrib.get(\"filename\", \"\")\n if not fn:\n continue\n # Normalize to absolute path\n f_abs = (\n fn\n if os.path.isabs(fn)\n else os.path.abspath(os.path.join(self.root, fn))","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.attach_coverage_from_xml","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.attach_coverage_from_xml#L796-L860","kind":"function","name":"attach_coverage_from_xml","path":"examples/scripts/code_graph.py","language":"python","start_line":796,"end_line":860,"context_start_line":776,"context_end_line":880,"code":" \"all\",\n \"sorted\",\n \"hasattr\",\n \"getattr\",\n \"setattr\",\n \"isinstance\",\n \"issubclass\",\n }\n\n def _file_hash(self, path: str) -> str:\n try:\n import hashlib\n\n with open(path, \"rb\") as rf:\n return hashlib.sha1(rf.read()).hexdigest()\n except Exception:\n return \"\"\n\n # --- Coverage --- #\n\n def attach_coverage_from_xml(self, xml_path: str) -> None:\n try:\n import xml.etree.ElementTree as ET\n\n tree = ET.parse(xml_path)\n root = tree.getroot()\n files_hits: Dict[str, set[int]] = {}\n # coverage.py XML: ... \n for cls in root.findall(\".//class\"):\n fn = cls.attrib.get(\"filename\", \"\")\n if not fn:\n continue\n # Normalize to absolute path\n f_abs = (\n fn\n if os.path.isabs(fn)\n else os.path.abspath(os.path.join(self.root, fn))\n )\n hits = files_hits.setdefault(f_abs, set())\n for ln in cls.findall(\".//line\"):\n try:\n num = int(ln.attrib.get(\"number\", \"0\"))\n h = int(ln.attrib.get(\"hits\", \"0\"))\n if h > 0:\n hits.add(num)\n except Exception:\n continue\n # Some coverage.xml variants place nodes\n if not files_hits:\n for fnode in root.findall(\".//file\"):\n fn = fnode.attrib.get(\"filename\", \"\")\n if not fn:\n continue\n f_abs = (\n fn\n if os.path.isabs(fn)\n else os.path.abspath(os.path.join(self.root, fn))\n )\n hits = files_hits.setdefault(f_abs, set())\n for ln in fnode.findall(\".//line\"):\n try:\n num = int(ln.attrib.get(\"number\", \"0\"))\n h = int(ln.attrib.get(\"hits\", \"0\"))\n if h > 0:\n hits.add(num)\n except Exception:\n continue\n self.coverage_files = files_hits\n # Compute per-symbol coverage\n sym_cov: Dict[str, float] = {}\n for fqn, sym in self.symbols_by_fqn.items():\n covered = files_hits.get(sym.file, set())\n a = int(sym.line)\n b = int(sym.end_line) if int(sym.end_line) >= a else a\n span = list(range(a, b + 1))\n if not span:\n sym_cov[fqn] = 0.0\n continue\n hits = sum(1 for x in span if x in covered)\n sym_cov[fqn] = hits / float(len(span))\n self.symbol_coverage = sym_cov\n except Exception:\n # Leave coverage empty on error\n self.coverage_files = {}\n self.symbol_coverage = {}\n\n def coverage_of(self, fqn: str) -> Optional[float]:\n return self.symbol_coverage.get(fqn)\n\n\ndef _cli() -> None:\n import argparse\n import json\n\n p = argparse.ArgumentParser()\n p.add_argument(\"root\", nargs=\"?\", default=\"./repo\")\n p.add_argument(\"--ignore\", action=\"append\", default=None, help=\"Relative paths to ignore (repeatable)\")\n p.add_argument(\"--owners-of\", dest=\"owners_of\", default=None)\n p.add_argument(\"--search\", dest=\"search\", default=None)\n p.add_argument(\"--defs-in\", dest=\"defs_in\", default=None)\n p.add_argument(\"--calls-of\", dest=\"calls_of\", default=None)\n p.add_argument(\"--who-calls\", dest=\"who_calls\", default=None)\n p.add_argument(\"--dump\", dest=\"dump\", action=\"store_true\")\n p.add_argument(\"--coverage-xml\", dest=\"coverage_xml\", default=None)\n p.add_argument(\"--coverage-of\", dest=\"coverage_of\", default=None)","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.coverage_of","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.coverage_of#L862-L863","kind":"function","name":"coverage_of","path":"examples/scripts/code_graph.py","language":"python","start_line":862,"end_line":863,"context_start_line":842,"context_end_line":883,"code":" continue\n self.coverage_files = files_hits\n # Compute per-symbol coverage\n sym_cov: Dict[str, float] = {}\n for fqn, sym in self.symbols_by_fqn.items():\n covered = files_hits.get(sym.file, set())\n a = int(sym.line)\n b = int(sym.end_line) if int(sym.end_line) >= a else a\n span = list(range(a, b + 1))\n if not span:\n sym_cov[fqn] = 0.0\n continue\n hits = sum(1 for x in span if x in covered)\n sym_cov[fqn] = hits / float(len(span))\n self.symbol_coverage = sym_cov\n except Exception:\n # Leave coverage empty on error\n self.coverage_files = {}\n self.symbol_coverage = {}\n\n def coverage_of(self, fqn: str) -> Optional[float]:\n return self.symbol_coverage.get(fqn)\n\n\ndef _cli() -> None:\n import argparse\n import json\n\n p = argparse.ArgumentParser()\n p.add_argument(\"root\", nargs=\"?\", default=\"./repo\")\n p.add_argument(\"--ignore\", action=\"append\", default=None, help=\"Relative paths to ignore (repeatable)\")\n p.add_argument(\"--owners-of\", dest=\"owners_of\", default=None)\n p.add_argument(\"--search\", dest=\"search\", default=None)\n p.add_argument(\"--defs-in\", dest=\"defs_in\", default=None)\n p.add_argument(\"--calls-of\", dest=\"calls_of\", default=None)\n p.add_argument(\"--who-calls\", dest=\"who_calls\", default=None)\n p.add_argument(\"--dump\", dest=\"dump\", action=\"store_true\")\n p.add_argument(\"--coverage-xml\", dest=\"coverage_xml\", default=None)\n p.add_argument(\"--coverage-of\", dest=\"coverage_of\", default=None)\n p.add_argument(\"--refs-of\", dest=\"refs_of\", default=None)\n p.add_argument(\"--tests-for\", dest=\"tests_for\", default=None)\n p.add_argument(\"--tests-for-module\", dest=\"tests_for_module\", default=None)","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._cur_qualname","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._cur_qualname#L974-L975","kind":"function","name":"_cur_qualname","path":"examples/scripts/code_graph.py","language":"python","start_line":974,"end_line":975,"context_start_line":954,"context_end_line":995,"code":" )\n )\n return\n # Dump summary\n print(json.dumps({\"files\": len(g.indexed_files), \"symbols\": len(g.symbols_by_fqn)}))\n\n\nclass _ModuleVisitor(ast.NodeVisitor):\n def __init__(self, module: str, path: str) -> None:\n self.module = module\n self.path = path\n self.symbols: List[Symbol] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_key)\n self.stack: List[str] = [] # qualname stack\n self.class_stack: List[str] = []\n self.imports: Dict[str, str] = {}\n self.import_modules: List[str] = []\n self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None\n\n def _fqn(self, name: str) -> str:\n q = self._cur_qualname()\n return f\"{self.module}.{q + ('.' if q else '')}{name}\"\n\n def visit_Import(self, node: ast.Import) -> Any: # type: ignore[override]\n for alias in node.names:\n asname = alias.asname or alias.name.split(\".\")[-1]\n self.imports[asname] = alias.name\n self.import_modules.append(alias.name.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: # type: ignore[override]\n # Resolve relative imports: from .x import y\n if node.level and node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._cur_class","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._cur_class#L977-L978","kind":"function","name":"_cur_class","path":"examples/scripts/code_graph.py","language":"python","start_line":977,"end_line":978,"context_start_line":957,"context_end_line":998,"code":" # Dump summary\n print(json.dumps({\"files\": len(g.indexed_files), \"symbols\": len(g.symbols_by_fqn)}))\n\n\nclass _ModuleVisitor(ast.NodeVisitor):\n def __init__(self, module: str, path: str) -> None:\n self.module = module\n self.path = path\n self.symbols: List[Symbol] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_key)\n self.stack: List[str] = [] # qualname stack\n self.class_stack: List[str] = []\n self.imports: Dict[str, str] = {}\n self.import_modules: List[str] = []\n self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None\n\n def _fqn(self, name: str) -> str:\n q = self._cur_qualname()\n return f\"{self.module}.{q + ('.' if q else '')}{name}\"\n\n def visit_Import(self, node: ast.Import) -> Any: # type: ignore[override]\n for alias in node.names:\n asname = alias.asname or alias.name.split(\".\")[-1]\n self.imports[asname] = alias.name\n self.import_modules.append(alias.name.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: # type: ignore[override]\n # Resolve relative imports: from .x import y\n if node.level and node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n prefix = base[:-up] if up > 0 else base\n mod = \".\".join([p for p in prefix if p] + [node.module])\n elif node.level and not node.module:","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._fqn","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._fqn#L980-L982","kind":"function","name":"_fqn","path":"examples/scripts/code_graph.py","language":"python","start_line":980,"end_line":982,"context_start_line":960,"context_end_line":1002,"code":"\nclass _ModuleVisitor(ast.NodeVisitor):\n def __init__(self, module: str, path: str) -> None:\n self.module = module\n self.path = path\n self.symbols: List[Symbol] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_key)\n self.stack: List[str] = [] # qualname stack\n self.class_stack: List[str] = []\n self.imports: Dict[str, str] = {}\n self.import_modules: List[str] = []\n self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None\n\n def _fqn(self, name: str) -> str:\n q = self._cur_qualname()\n return f\"{self.module}.{q + ('.' if q else '')}{name}\"\n\n def visit_Import(self, node: ast.Import) -> Any: # type: ignore[override]\n for alias in node.names:\n asname = alias.asname or alias.name.split(\".\")[-1]\n self.imports[asname] = alias.name\n self.import_modules.append(alias.name.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: # type: ignore[override]\n # Resolve relative imports: from .x import y\n if node.level and node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n prefix = base[:-up] if up > 0 else base\n mod = \".\".join([p for p in prefix if p] + [node.module])\n elif node.level and not node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n mod = \".\".join(base[:-up])\n else:","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.visit_Import","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.visit_Import#L984-L989","kind":"function","name":"visit_Import","path":"examples/scripts/code_graph.py","language":"python","start_line":984,"end_line":989,"context_start_line":964,"context_end_line":1009,"code":" self.path = path\n self.symbols: List[Symbol] = []\n self.calls: List[Tuple[str, str]] = [] # (caller_fqn, callee_key)\n self.stack: List[str] = [] # qualname stack\n self.class_stack: List[str] = []\n self.imports: Dict[str, str] = {}\n self.import_modules: List[str] = []\n self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None\n\n def _fqn(self, name: str) -> str:\n q = self._cur_qualname()\n return f\"{self.module}.{q + ('.' if q else '')}{name}\"\n\n def visit_Import(self, node: ast.Import) -> Any: # type: ignore[override]\n for alias in node.names:\n asname = alias.asname or alias.name.split(\".\")[-1]\n self.imports[asname] = alias.name\n self.import_modules.append(alias.name.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: # type: ignore[override]\n # Resolve relative imports: from .x import y\n if node.level and node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n prefix = base[:-up] if up > 0 else base\n mod = \".\".join([p for p in prefix if p] + [node.module])\n elif node.level and not node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n mod = \".\".join(base[:-up])\n else:\n mod = node.module or \"\"\n for alias in node.names:\n # star import\n if getattr(alias, \"name\", \"\") == \"*\":\n if mod:\n self.star_imports.append(mod)\n continue","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.visit_ImportFrom","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.visit_ImportFrom#L991-L1014","kind":"function","name":"visit_ImportFrom","path":"examples/scripts/code_graph.py","language":"python","start_line":991,"end_line":1014,"context_start_line":971,"context_end_line":1034,"code":" self.star_imports: List[str] = []\n self.exports: List[str] = []\n\n def _cur_qualname(self) -> str:\n return \".\".join(self.stack)\n\n def _cur_class(self) -> Optional[str]:\n return self.class_stack[-1] if self.class_stack else None\n\n def _fqn(self, name: str) -> str:\n q = self._cur_qualname()\n return f\"{self.module}.{q + ('.' if q else '')}{name}\"\n\n def visit_Import(self, node: ast.Import) -> Any: # type: ignore[override]\n for alias in node.names:\n asname = alias.asname or alias.name.split(\".\")[-1]\n self.imports[asname] = alias.name\n self.import_modules.append(alias.name.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: # type: ignore[override]\n # Resolve relative imports: from .x import y\n if node.level and node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n prefix = base[:-up] if up > 0 else base\n mod = \".\".join([p for p in prefix if p] + [node.module])\n elif node.level and not node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n mod = \".\".join(base[:-up])\n else:\n mod = node.module or \"\"\n for alias in node.names:\n # star import\n if getattr(alias, \"name\", \"\") == \"*\":\n if mod:\n self.star_imports.append(mod)\n continue\n asname = alias.asname or alias.name\n self.imports[asname] = f\"{mod}.{alias.name}\" if mod else alias.name\n if mod:\n self.import_modules.append(mod.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ClassDef(self, node: ast.ClassDef) -> Any: # type: ignore[override]\n fqn = self._fqn(node.name)\n try:\n doc_s = ast.get_docstring(node) or None\n except Exception:\n doc_s = None\n sym = Symbol(\n fqn=fqn,\n name=node.name,\n qualname=self._cur_qualname(),\n kind=\"class\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.visit_ClassDef","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.visit_ClassDef#L1016-L1038","kind":"function","name":"visit_ClassDef","path":"examples/scripts/code_graph.py","language":"python","start_line":1016,"end_line":1038,"context_start_line":996,"context_end_line":1058,"code":" prefix = base[:-up] if up > 0 else base\n mod = \".\".join([p for p in prefix if p] + [node.module])\n elif node.level and not node.module:\n base = self.module.split(\".\")\n up = max(0, int(node.level))\n mod = \".\".join(base[:-up])\n else:\n mod = node.module or \"\"\n for alias in node.names:\n # star import\n if getattr(alias, \"name\", \"\") == \"*\":\n if mod:\n self.star_imports.append(mod)\n continue\n asname = alias.asname or alias.name\n self.imports[asname] = f\"{mod}.{alias.name}\" if mod else alias.name\n if mod:\n self.import_modules.append(mod.split(\".\")[0])\n self.generic_visit(node)\n\n def visit_ClassDef(self, node: ast.ClassDef) -> Any: # type: ignore[override]\n fqn = self._fqn(node.name)\n try:\n doc_s = ast.get_docstring(node) or None\n except Exception:\n doc_s = None\n sym = Symbol(\n fqn=fqn,\n name=node.name,\n qualname=self._cur_qualname(),\n kind=\"class\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n self.class_stack.append(node.name)\n self.generic_visit(node)\n self.class_stack.pop()\n self.stack.pop()\n\n def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def _visit_func_like(self, node: Any) -> None:\n fqn = self._fqn(node.name)\n # Signature & returns\n sig_s, ret_s = None, None\n try:\n params = []\n for a in getattr(node, \"args\", None).args or []:\n nm = getattr(a, \"arg\", \"\")\n ann = getattr(a, \"annotation\", None)\n params.append(f\"{nm}:{ast.unparse(ann)}\" if ann is not None else nm)\n ret = getattr(node, \"returns\", None)\n ret_s = ast.unparse(ret) if ret is not None else None\n sig_s = f\"({', '.join(params)})\"","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.visit_FunctionDef","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.visit_FunctionDef#L1040-L1041","kind":"function","name":"visit_FunctionDef","path":"examples/scripts/code_graph.py","language":"python","start_line":1040,"end_line":1041,"context_start_line":1020,"context_end_line":1061,"code":" except Exception:\n doc_s = None\n sym = Symbol(\n fqn=fqn,\n name=node.name,\n qualname=self._cur_qualname(),\n kind=\"class\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n self.class_stack.append(node.name)\n self.generic_visit(node)\n self.class_stack.pop()\n self.stack.pop()\n\n def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def _visit_func_like(self, node: Any) -> None:\n fqn = self._fqn(node.name)\n # Signature & returns\n sig_s, ret_s = None, None\n try:\n params = []\n for a in getattr(node, \"args\", None).args or []:\n nm = getattr(a, \"arg\", \"\")\n ann = getattr(a, \"annotation\", None)\n params.append(f\"{nm}:{ast.unparse(ann)}\" if ann is not None else nm)\n ret = getattr(node, \"returns\", None)\n ret_s = ast.unparse(ret) if ret is not None else None\n sig_s = f\"({', '.join(params)})\"\n except Exception:\n sig_s, ret_s = None, None\n try:","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.visit_AsyncFunctionDef","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.visit_AsyncFunctionDef#L1043-L1044","kind":"function","name":"visit_AsyncFunctionDef","path":"examples/scripts/code_graph.py","language":"python","start_line":1043,"end_line":1044,"context_start_line":1023,"context_end_line":1064,"code":" fqn=fqn,\n name=node.name,\n qualname=self._cur_qualname(),\n kind=\"class\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n self.class_stack.append(node.name)\n self.generic_visit(node)\n self.class_stack.pop()\n self.stack.pop()\n\n def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def _visit_func_like(self, node: Any) -> None:\n fqn = self._fqn(node.name)\n # Signature & returns\n sig_s, ret_s = None, None\n try:\n params = []\n for a in getattr(node, \"args\", None).args or []:\n nm = getattr(a, \"arg\", \"\")\n ann = getattr(a, \"annotation\", None)\n params.append(f\"{nm}:{ast.unparse(ann)}\" if ann is not None else nm)\n ret = getattr(node, \"returns\", None)\n ret_s = ast.unparse(ret) if ret is not None else None\n sig_s = f\"({', '.join(params)})\"\n except Exception:\n sig_s, ret_s = None, None\n try:\n doc_s = ast.get_docstring(node) or None\n except Exception:\n doc_s = None","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._visit_func_like","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._visit_func_like#L1046-L1091","kind":"function","name":"_visit_func_like","path":"examples/scripts/code_graph.py","language":"python","start_line":1046,"end_line":1091,"context_start_line":1026,"context_end_line":1111,"code":" kind=\"class\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n self.class_stack.append(node.name)\n self.generic_visit(node)\n self.class_stack.pop()\n self.stack.pop()\n\n def visit_FunctionDef(self, node: ast.FunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any: # type: ignore[override]\n self._visit_func_like(node)\n\n def _visit_func_like(self, node: Any) -> None:\n fqn = self._fqn(node.name)\n # Signature & returns\n sig_s, ret_s = None, None\n try:\n params = []\n for a in getattr(node, \"args\", None).args or []:\n nm = getattr(a, \"arg\", \"\")\n ann = getattr(a, \"annotation\", None)\n params.append(f\"{nm}:{ast.unparse(ann)}\" if ann is not None else nm)\n ret = getattr(node, \"returns\", None)\n ret_s = ast.unparse(ret) if ret is not None else None\n sig_s = f\"({', '.join(params)})\"\n except Exception:\n sig_s, ret_s = None, None\n try:\n doc_s = ast.get_docstring(node) or None\n except Exception:\n doc_s = None\n sym = Symbol(\n fqn=fqn,\n name=node.name,\n qualname=self._cur_qualname(),\n kind=\"function\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n signature=sig_s,\n returns=ret_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n # Traverse body to collect calls\n for sub in ast.walk(node):\n if isinstance(sub, ast.Call):\n callee_key = self._extract_callee_key(sub.func)\n if callee_key:\n self.calls.append((fqn, callee_key))\n # Decorators as calls\n for dec in getattr(node, \"decorator_list\", []) or []:\n callee_key = self._extract_callee_key(dec)\n if callee_key:\n self.calls.append((fqn, callee_key))\n self.stack.pop()\n\n def visit_Assign(self, node: ast.Assign) -> Any: # type: ignore[override]\n for t in getattr(node, \"targets\", []) or []:\n if isinstance(t, ast.Name):\n fqn = self._fqn(t.id)\n sym = Symbol(\n fqn=fqn,\n name=t.id,\n qualname=self._cur_qualname(),\n kind=\"variable\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n )\n self.symbols.append(sym)\n # capture __all__ = [\"...\"]\n try:\n names = []\n is_all = any(","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph.visit_Assign","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph.visit_Assign#L1093-L1122","kind":"function","name":"visit_Assign","path":"examples/scripts/code_graph.py","language":"python","start_line":1093,"end_line":1122,"context_start_line":1073,"context_end_line":1142,"code":" end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n doc=doc_s,\n signature=sig_s,\n returns=ret_s,\n )\n self.symbols.append(sym)\n self.stack.append(node.name)\n # Traverse body to collect calls\n for sub in ast.walk(node):\n if isinstance(sub, ast.Call):\n callee_key = self._extract_callee_key(sub.func)\n if callee_key:\n self.calls.append((fqn, callee_key))\n # Decorators as calls\n for dec in getattr(node, \"decorator_list\", []) or []:\n callee_key = self._extract_callee_key(dec)\n if callee_key:\n self.calls.append((fqn, callee_key))\n self.stack.pop()\n\n def visit_Assign(self, node: ast.Assign) -> Any: # type: ignore[override]\n for t in getattr(node, \"targets\", []) or []:\n if isinstance(t, ast.Name):\n fqn = self._fqn(t.id)\n sym = Symbol(\n fqn=fqn,\n name=t.id,\n qualname=self._cur_qualname(),\n kind=\"variable\",\n module=self.module,\n file=self.path,\n line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n )\n self.symbols.append(sym)\n # capture __all__ = [\"...\"]\n try:\n names = []\n is_all = any(\n (isinstance(t, ast.Name) and t.id == \"__all__\") for t in node.targets\n )\n if is_all and isinstance(node.value, (ast.List, ast.Tuple)):\n for el in node.value.elts:\n if isinstance(el, ast.Constant) and isinstance(el.value, str):\n names.append(el.value)\n if names:\n self.exports.extend(names)\n except Exception:\n pass\n self.generic_visit(node)\n\n def _extract_callee_key(self, fn: ast.AST) -> Optional[str]:\n # simple name\n if isinstance(fn, ast.Name):\n return fn.id\n\n # super().method()\n if (\n isinstance(fn, ast.Attribute)\n and isinstance(fn.value, ast.Call)\n and isinstance(fn.value.func, ast.Name)\n and fn.value.func.id == \"super\"\n ):\n meth = fn.attr\n cur_cls = self._cur_class()\n if cur_cls:\n return f\"{self.module}.{cur_cls}.{meth}\"\n return meth\n\n # obj.attr chain","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.code_graph._extract_callee_key","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.code_graph._extract_callee_key#L1124-L1193","kind":"function","name":"_extract_callee_key","path":"examples/scripts/code_graph.py","language":"python","start_line":1124,"end_line":1193,"context_start_line":1104,"context_end_line":1197,"code":" line=getattr(node, \"lineno\", 1),\n end_line=getattr(node, \"end_lineno\", getattr(node, \"lineno\", 1)),\n )\n self.symbols.append(sym)\n # capture __all__ = [\"...\"]\n try:\n names = []\n is_all = any(\n (isinstance(t, ast.Name) and t.id == \"__all__\") for t in node.targets\n )\n if is_all and isinstance(node.value, (ast.List, ast.Tuple)):\n for el in node.value.elts:\n if isinstance(el, ast.Constant) and isinstance(el.value, str):\n names.append(el.value)\n if names:\n self.exports.extend(names)\n except Exception:\n pass\n self.generic_visit(node)\n\n def _extract_callee_key(self, fn: ast.AST) -> Optional[str]:\n # simple name\n if isinstance(fn, ast.Name):\n return fn.id\n\n # super().method()\n if (\n isinstance(fn, ast.Attribute)\n and isinstance(fn.value, ast.Call)\n and isinstance(fn.value.func, ast.Name)\n and fn.value.func.id == \"super\"\n ):\n meth = fn.attr\n cur_cls = self._cur_class()\n if cur_cls:\n return f\"{self.module}.{cur_cls}.{meth}\"\n return meth\n\n # obj.attr chain\n if isinstance(fn, ast.Attribute):\n parts: List[str] = []\n cur = fn\n while isinstance(cur, ast.Attribute):\n parts.append(cur.attr)\n cur = cur.value\n parts.reverse()\n\n if isinstance(cur, ast.Name):\n base = cur.id\n if base in (\"self\", \"cls\"):\n cur_cls = self._cur_class()\n if cur_cls and parts:\n return f\"{self.module}.{cur_cls}.{parts[-1]}\"\n return f\"{self.module}.{cur_cls}\" if cur_cls else parts[-1]\n if base in self.imports:\n return f\"{base}:{parts[-1]}\" if parts else base\n return f\"{self.module}.{base}.{parts[-1]}\" if parts else base\n # getattr(module, \"name\") heuristic\n if (\n isinstance(fn, ast.Call)\n and isinstance(fn.func, ast.Name)\n and fn.func.id == \"getattr\"\n and fn.args\n and len(fn.args) >= 2\n and isinstance(fn.args[0], ast.Name)\n and isinstance(fn.args[1], ast.Constant)\n and isinstance(fn.args[1].value, str)\n ):\n base = fn.args[0].id\n name = fn.args[1].value\n if base in self.imports:\n return f\"{self.imports[base]}.{name}\"\n cur_cls = self._cur_class()\n if base in (\"self\", \"cls\") and cur_cls:\n return f\"{self.module}.{cur_cls}.{name}\"\n return f\"{self.module}.{base}.{name}\"\n # importlib.import_module(\"pkg.mod\") heuristic\n if (\n isinstance(fn, ast.Call)\n and isinstance(fn.func, ast.Attribute)\n and isinstance(fn.func.value, ast.Name)\n and fn.func.value.id == \"importlib\"\n and fn.func.attr == \"import_module\"\n and fn.args\n and isinstance(fn.args[0], ast.Constant)\n and isinstance(fn.args[0].value, str)\n ):\n mod = str(fn.args[0].value)\n return mod\n return None\n\n\nif __name__ == \"__main__\":\n _cli()","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.datasets_loader","uri":"program://Program_Conditioned_Adapter/module/examples.scripts.datasets_loader#L1-L225","kind":"module","name":"examples.scripts.datasets_loader","path":"examples/scripts/datasets_loader.py","language":"python","start_line":1,"end_line":225,"context_start_line":1,"context_end_line":225,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Any, Iterable\n\n\ndef _read_json(path: Path) -> Dict[str, Any]:\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read())\n\texcept Exception:\n\t\treturn {}\n\n\ndef _load_local_jsonl(fp: Path, text_key: str = \"text\", max_n: int | None = None) -> List[str]:\n\ttexts: List[str] = []\n\tif not fp.exists():\n\t\treturn texts\n\twith open(fp, \"r\", encoding=\"utf-8\") as fh:\n\t\tfor line in fh:\n\t\t\tline = line.strip()\n\t\t\tif not line:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tobj = json.loads(line)\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\t\ttxt = str(obj.get(text_key) or \"\").strip()\n\t\t\tif txt:\n\t\t\t\ttexts.append(txt)\n\t\t\tif max_n is not None and len(texts) >= int(max_n):\n\t\t\t\tbreak\n\treturn texts\n\n\ndef _load_hf(repo: str, subset: str | None, split: str, max_n: int | None, cache_dir: Path, streaming: bool) -> List[str]:\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn []\n\ttry:\n\t\tif subset:\n\t\t\tds = load_dataset(repo, subset, split=split, cache_dir=str(cache_dir), streaming=streaming)\n\t\telse:\n\t\t\tds = load_dataset(repo, split=split, cache_dir=str(cache_dir), streaming=streaming)\n\texcept Exception:\n\t\treturn []\n\ttexts: List[str] = []\n\tif streaming:\n\t\tcount = 0\n\t\tfor row in ds: # type: ignore\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\t\tcombined = (prompt + (\"\\n\" + code if code else \"\")).strip()\n\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\t\tcount += 1\n\t\t\tif max_n is not None and count >= int(max_n):\n\t\t\t\tbreak\n\t\treturn texts\n\t# non-streaming\n\ttotal = len(ds) # type: ignore\n\tfor i in range(total):\n\t\trow = ds[i] # type: ignore\n\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\tcombined = (prompt + (\"\\n\" + code if code else \"\")).strip()\n\t\tif combined:\n\t\t\ttexts.append(combined)\n\t\tif max_n is not None and len(texts) >= int(max_n):\n\t\t\tbreak\n\treturn texts\n\n\ndef load_program_texts(example_dir: str, config_path: str | None = None, train_new_only: bool = False, state_path: str | None = None) -> Tuple[List[str], List[str]]:\n\t\"\"\"\n\tLoad training texts from a config that can reference local and HF datasets.\n\tPriority: local first; if missing, fetch from HF (optionally streaming) and store under local_dir.\n\n\tReturns (texts, loaded_source_names)\n\t\"\"\"\n\tex_dir = Path(example_dir).resolve()\n\t# Config discovery\n\tcfg_path = Path(config_path) if config_path else (ex_dir / \"datasets\" / \"config.json\")\n\tcfg = _read_json(cfg_path) if cfg_path.exists() else {}\n\tlocal_dir = Path(cfg.get(\"local_dir\") or (ex_dir / \"datasets\")).resolve()\n\tlocal_dir.mkdir(parents=True, exist_ok=True)\n\tcache_dir = local_dir / \"hf_cache\"\n\tcache_dir.mkdir(parents=True, exist_ok=True)\n\tsources: List[Dict[str, Any]] = list(cfg.get(\"sources\") or [])\n\tstreaming = os.getenv(\"HF_DATASETS_STREAMING\", \"1\") == \"1\"\n\tfull_datasets = os.getenv(\"FULL_DATASETS\", \"1\") == \"1\"\n\t# Empty/0 -> unlimited by default\n\t_default_env = os.getenv(\"DATASET_MAX_N_PER_SOURCE\", \"\") or \"0\"\n\ttry:\n\t\tdefault_max_n_val = int(_default_env)\n\texcept Exception:\n\t\tdefault_max_n_val = 0\n\n\t# Optional program state to support \"train new only\"\n\tseen: List[str] = []\n\tif train_new_only and state_path:\n\t\ttry:\n\t\t\tst = _read_json(Path(state_path))\n\t\t\tseen = list(st.get(\"datasets_seen\") or [])\n\t\texcept Exception:\n\t\t\tseen = []\n\n\ttexts_acc: List[str] = []\n\tloaded_names: List[str] = []\n\tfor src in sources:\n\t\tname = str(src.get(\"name\") or \"\").strip()\n\t\tif not name:\n\t\t\tcontinue\n\t\tif train_new_only and name in seen:\n\t\t\tcontinue\n\t\tif full_datasets:\n\t\t\tmax_n = None\n\t\telse:\n\t\t\tmax_n = src.get(\"max_n\")\n\t\t\tif max_n is None and default_max_n_val > 0:\n\t\t\t\tmax_n = default_max_n_val\n\t\ttext_key = str(src.get(\"text_key\") or \"text\").strip()\n\t\tsubset = src.get(\"subset\")\n\t\tsplit = str(src.get(\"split\") or \"train\").strip()\n\t\t# Determine local file path: explicit 'path' or derived from name+split\n\t\trel = str(src.get(\"path\") or \"\").strip()\n\t\tif rel:\n\t\t\tfp = Path(rel)\n\t\t\tif not fp.is_absolute():\n\t\t\t\tfp = (local_dir / rel).resolve()\n\t\telse:\n\t\t\tsuffix = f\"_{split}\" if split else \"\"\n\t\t\tfp = (local_dir / f\"{name.replace('/', '_')}{suffix}.jsonl\").resolve()\n\t\t# Try local first\n\t\tout_texts: List[str] = _load_local_jsonl(fp, text_key=text_key, max_n=max_n)\n\t\t# If not present, try HF using 'name' as repo id\n\t\tif not out_texts:\n\t\t\tout_texts = _load_hf(name, subset, split, max_n, cache_dir, streaming=streaming)\n\t\t\t# Persist to local for future runs\n\t\t\tif out_texts:\n\t\t\t\ttry:\n\t\t\t\t\twith open(fp, \"w\", encoding=\"utf-8\") as fh:\n\t\t\t\t\t\tfor t in out_texts:\n\t\t\t\t\t\t\tfh.write(json.dumps({\"text\": t}) + \"\\n\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\tif out_texts:\n\t\t\ttexts_acc.extend(out_texts)\n\t\t\tloaded_names.append(f\"{name}:{split}\" if split else name)\n\treturn texts_acc, loaded_names\n\n\ndef iter_program_texts(example_dir: str, config_path: str | None = None, train_new_only: bool = False, state_path: str | None = None) -> Iterable[Tuple[List[str], str]]:\n\t\"\"\"\n\tYields (texts, source_name) one dataset at a time using the same resolution rules as load_program_texts.\n\tRespects HF streaming and per-source max limits.\n\t\"\"\"\n\tex_dir = Path(example_dir).resolve()\n\tcfg_path = Path(config_path) if config_path else (ex_dir / \"datasets\" / \"config.json\")\n\tcfg = _read_json(cfg_path) if cfg_path.exists() else {}\n\tlocal_dir = Path(cfg.get(\"local_dir\") or (ex_dir / \"datasets\")).resolve()\n\tlocal_dir.mkdir(parents=True, exist_ok=True)\n\tcache_dir = local_dir / \"hf_cache\"\n\tcache_dir.mkdir(parents=True, exist_ok=True)\n\tsources: List[Dict[str, Any]] = list(cfg.get(\"sources\") or [])\n\tstreaming = os.getenv(\"HF_DATASETS_STREAMING\", \"1\") == \"1\"\n\tfull_datasets = os.getenv(\"FULL_DATASETS\", \"1\") == \"1\"\n\t_default_env = os.getenv(\"DATASET_MAX_N_PER_SOURCE\", \"\") or \"0\"\n\ttry:\n\t\tdefault_max_n_val = int(_default_env)\n\texcept Exception:\n\t\tdefault_max_n_val = 0\n\tmax_sources = int(os.getenv(\"DATASET_LIMIT_SOURCES\", \"0\") or \"0\") # 0 means no limit\n\n\tseen: List[str] = []\n\tif train_new_only and state_path:\n\t\ttry:\n\t\t\tst = _read_json(Path(state_path))\n\t\t\tseen = list(st.get(\"datasets_seen\") or [])\n\t\texcept Exception:\n\t\t\tseen = []\n\n\tcount_sources = 0\n\tfor src in sources:\n\t\tname = str(src.get(\"name\") or \"\").strip()\n\t\tif not name:\n\t\t\tcontinue\n\t\tif train_new_only and name in seen:\n\t\t\tcontinue\n\t\tif full_datasets:\n\t\t\tmax_n = None\n\t\telse:\n\t\t\tmax_n = src.get(\"max_n\")\n\t\t\tif max_n is None and default_max_n_val > 0:\n\t\t\t\tmax_n = default_max_n_val\n\t\ttext_key = str(src.get(\"text_key\") or \"text\").strip()\n\t\tsubset = src.get(\"subset\")\n\t\tsplit = str(src.get(\"split\") or \"train\").strip()\n\t\trel = str(src.get(\"path\") or \"\").strip()\n\t\tif rel:\n\t\t\tfp = Path(rel)\n\t\t\tif not fp.is_absolute():\n\t\t\t\tfp = (local_dir / rel).resolve()\n\t\telse:\n\t\t\tsuffix = f\"_{split}\" if split else \"\"\n\t\t\tfp = (local_dir / f\"{name.replace('/', '_')}{suffix}.jsonl\").resolve()\n\t\tout_texts: List[str] = _load_local_jsonl(fp, text_key=text_key, max_n=max_n)\n\t\tif not out_texts:\n\t\t\tout_texts = _load_hf(name, subset, split, max_n, cache_dir, streaming=streaming)\n\t\t\tif out_texts:\n\t\t\t\ttry:\n\t\t\t\t\twith open(fp, \"w\", encoding=\"utf-8\") as fh:\n\t\t\t\t\t\tfor t in out_texts:\n\t\t\t\t\t\t\tfh.write(json.dumps({\"text\": t}) + \"\\n\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\tif out_texts:\n\t\t\tyield out_texts, (f\"{name}:{split}\" if split else name)\n\t\t\tcount_sources += 1\n\t\t\tif max_sources and count_sources >= max_sources:\n\t\t\t\tbreak\n","source_hash":"a2681e7cd4a6637fe7e308616893d0611160a5134ef5301a3c142c385e994bee","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.datasets_loader._read_json","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.datasets_loader._read_json#L9-L14","kind":"function","name":"_read_json","path":"examples/scripts/datasets_loader.py","language":"python","start_line":9,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Any, Iterable\n\n\ndef _read_json(path: Path) -> Dict[str, Any]:\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read())\n\texcept Exception:\n\t\treturn {}\n\n\ndef _load_local_jsonl(fp: Path, text_key: str = \"text\", max_n: int | None = None) -> List[str]:\n\ttexts: List[str] = []\n\tif not fp.exists():\n\t\treturn texts\n\twith open(fp, \"r\", encoding=\"utf-8\") as fh:\n\t\tfor line in fh:\n\t\t\tline = line.strip()\n\t\t\tif not line:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tobj = json.loads(line)\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\t\ttxt = str(obj.get(text_key) or \"\").strip()\n\t\t\tif txt:\n\t\t\t\ttexts.append(txt)\n\t\t\tif max_n is not None and len(texts) >= int(max_n):\n\t\t\t\tbreak","source_hash":"a2681e7cd4a6637fe7e308616893d0611160a5134ef5301a3c142c385e994bee","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.datasets_loader._load_local_jsonl","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.datasets_loader._load_local_jsonl#L17-L35","kind":"function","name":"_load_local_jsonl","path":"examples/scripts/datasets_loader.py","language":"python","start_line":17,"end_line":35,"context_start_line":1,"context_end_line":55,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Any, Iterable\n\n\ndef _read_json(path: Path) -> Dict[str, Any]:\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read())\n\texcept Exception:\n\t\treturn {}\n\n\ndef _load_local_jsonl(fp: Path, text_key: str = \"text\", max_n: int | None = None) -> List[str]:\n\ttexts: List[str] = []\n\tif not fp.exists():\n\t\treturn texts\n\twith open(fp, \"r\", encoding=\"utf-8\") as fh:\n\t\tfor line in fh:\n\t\t\tline = line.strip()\n\t\t\tif not line:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tobj = json.loads(line)\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\t\ttxt = str(obj.get(text_key) or \"\").strip()\n\t\t\tif txt:\n\t\t\t\ttexts.append(txt)\n\t\t\tif max_n is not None and len(texts) >= int(max_n):\n\t\t\t\tbreak\n\treturn texts\n\n\ndef _load_hf(repo: str, subset: str | None, split: str, max_n: int | None, cache_dir: Path, streaming: bool) -> List[str]:\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn []\n\ttry:\n\t\tif subset:\n\t\t\tds = load_dataset(repo, subset, split=split, cache_dir=str(cache_dir), streaming=streaming)\n\t\telse:\n\t\t\tds = load_dataset(repo, split=split, cache_dir=str(cache_dir), streaming=streaming)\n\texcept Exception:\n\t\treturn []\n\ttexts: List[str] = []\n\tif streaming:\n\t\tcount = 0\n\t\tfor row in ds: # type: ignore\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()","source_hash":"a2681e7cd4a6637fe7e308616893d0611160a5134ef5301a3c142c385e994bee","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.datasets_loader._load_hf","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.datasets_loader._load_hf#L38-L74","kind":"function","name":"_load_hf","path":"examples/scripts/datasets_loader.py","language":"python","start_line":38,"end_line":74,"context_start_line":18,"context_end_line":94,"code":"\ttexts: List[str] = []\n\tif not fp.exists():\n\t\treturn texts\n\twith open(fp, \"r\", encoding=\"utf-8\") as fh:\n\t\tfor line in fh:\n\t\t\tline = line.strip()\n\t\t\tif not line:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tobj = json.loads(line)\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\t\t\ttxt = str(obj.get(text_key) or \"\").strip()\n\t\t\tif txt:\n\t\t\t\ttexts.append(txt)\n\t\t\tif max_n is not None and len(texts) >= int(max_n):\n\t\t\t\tbreak\n\treturn texts\n\n\ndef _load_hf(repo: str, subset: str | None, split: str, max_n: int | None, cache_dir: Path, streaming: bool) -> List[str]:\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn []\n\ttry:\n\t\tif subset:\n\t\t\tds = load_dataset(repo, subset, split=split, cache_dir=str(cache_dir), streaming=streaming)\n\t\telse:\n\t\t\tds = load_dataset(repo, split=split, cache_dir=str(cache_dir), streaming=streaming)\n\texcept Exception:\n\t\treturn []\n\ttexts: List[str] = []\n\tif streaming:\n\t\tcount = 0\n\t\tfor row in ds: # type: ignore\n\t\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\t\tcombined = (prompt + (\"\\n\" + code if code else \"\")).strip()\n\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\t\tcount += 1\n\t\t\tif max_n is not None and count >= int(max_n):\n\t\t\t\tbreak\n\t\treturn texts\n\t# non-streaming\n\ttotal = len(ds) # type: ignore\n\tfor i in range(total):\n\t\trow = ds[i] # type: ignore\n\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\tcombined = (prompt + (\"\\n\" + code if code else \"\")).strip()\n\t\tif combined:\n\t\t\ttexts.append(combined)\n\t\tif max_n is not None and len(texts) >= int(max_n):\n\t\t\tbreak\n\treturn texts\n\n\ndef load_program_texts(example_dir: str, config_path: str | None = None, train_new_only: bool = False, state_path: str | None = None) -> Tuple[List[str], List[str]]:\n\t\"\"\"\n\tLoad training texts from a config that can reference local and HF datasets.\n\tPriority: local first; if missing, fetch from HF (optionally streaming) and store under local_dir.\n\n\tReturns (texts, loaded_source_names)\n\t\"\"\"\n\tex_dir = Path(example_dir).resolve()\n\t# Config discovery\n\tcfg_path = Path(config_path) if config_path else (ex_dir / \"datasets\" / \"config.json\")\n\tcfg = _read_json(cfg_path) if cfg_path.exists() else {}\n\tlocal_dir = Path(cfg.get(\"local_dir\") or (ex_dir / \"datasets\")).resolve()\n\tlocal_dir.mkdir(parents=True, exist_ok=True)\n\tcache_dir = local_dir / \"hf_cache\"\n\tcache_dir.mkdir(parents=True, exist_ok=True)\n\tsources: List[Dict[str, Any]] = list(cfg.get(\"sources\") or [])\n\tstreaming = os.getenv(\"HF_DATASETS_STREAMING\", \"1\") == \"1\"\n\tfull_datasets = os.getenv(\"FULL_DATASETS\", \"1\") == \"1\"","source_hash":"a2681e7cd4a6637fe7e308616893d0611160a5134ef5301a3c142c385e994bee","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.datasets_loader.load_program_texts","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.datasets_loader.load_program_texts#L77-L153","kind":"function","name":"load_program_texts","path":"examples/scripts/datasets_loader.py","language":"python","start_line":77,"end_line":153,"context_start_line":57,"context_end_line":173,"code":"\t\t\tif combined:\n\t\t\t\ttexts.append(combined)\n\t\t\t\tcount += 1\n\t\t\tif max_n is not None and count >= int(max_n):\n\t\t\t\tbreak\n\t\treturn texts\n\t# non-streaming\n\ttotal = len(ds) # type: ignore\n\tfor i in range(total):\n\t\trow = ds[i] # type: ignore\n\t\tprompt = str(row.get(\"prompt\") or row.get(\"question\") or row.get(\"text\") or \"\").strip()\n\t\tcode = str(row.get(\"code\") or row.get(\"solution\") or \"\").strip()\n\t\tcombined = (prompt + (\"\\n\" + code if code else \"\")).strip()\n\t\tif combined:\n\t\t\ttexts.append(combined)\n\t\tif max_n is not None and len(texts) >= int(max_n):\n\t\t\tbreak\n\treturn texts\n\n\ndef load_program_texts(example_dir: str, config_path: str | None = None, train_new_only: bool = False, state_path: str | None = None) -> Tuple[List[str], List[str]]:\n\t\"\"\"\n\tLoad training texts from a config that can reference local and HF datasets.\n\tPriority: local first; if missing, fetch from HF (optionally streaming) and store under local_dir.\n\n\tReturns (texts, loaded_source_names)\n\t\"\"\"\n\tex_dir = Path(example_dir).resolve()\n\t# Config discovery\n\tcfg_path = Path(config_path) if config_path else (ex_dir / \"datasets\" / \"config.json\")\n\tcfg = _read_json(cfg_path) if cfg_path.exists() else {}\n\tlocal_dir = Path(cfg.get(\"local_dir\") or (ex_dir / \"datasets\")).resolve()\n\tlocal_dir.mkdir(parents=True, exist_ok=True)\n\tcache_dir = local_dir / \"hf_cache\"\n\tcache_dir.mkdir(parents=True, exist_ok=True)\n\tsources: List[Dict[str, Any]] = list(cfg.get(\"sources\") or [])\n\tstreaming = os.getenv(\"HF_DATASETS_STREAMING\", \"1\") == \"1\"\n\tfull_datasets = os.getenv(\"FULL_DATASETS\", \"1\") == \"1\"\n\t# Empty/0 -> unlimited by default\n\t_default_env = os.getenv(\"DATASET_MAX_N_PER_SOURCE\", \"\") or \"0\"\n\ttry:\n\t\tdefault_max_n_val = int(_default_env)\n\texcept Exception:\n\t\tdefault_max_n_val = 0\n\n\t# Optional program state to support \"train new only\"\n\tseen: List[str] = []\n\tif train_new_only and state_path:\n\t\ttry:\n\t\t\tst = _read_json(Path(state_path))\n\t\t\tseen = list(st.get(\"datasets_seen\") or [])\n\t\texcept Exception:\n\t\t\tseen = []\n\n\ttexts_acc: List[str] = []\n\tloaded_names: List[str] = []\n\tfor src in sources:\n\t\tname = str(src.get(\"name\") or \"\").strip()\n\t\tif not name:\n\t\t\tcontinue\n\t\tif train_new_only and name in seen:\n\t\t\tcontinue\n\t\tif full_datasets:\n\t\t\tmax_n = None\n\t\telse:\n\t\t\tmax_n = src.get(\"max_n\")\n\t\t\tif max_n is None and default_max_n_val > 0:\n\t\t\t\tmax_n = default_max_n_val\n\t\ttext_key = str(src.get(\"text_key\") or \"text\").strip()\n\t\tsubset = src.get(\"subset\")\n\t\tsplit = str(src.get(\"split\") or \"train\").strip()\n\t\t# Determine local file path: explicit 'path' or derived from name+split\n\t\trel = str(src.get(\"path\") or \"\").strip()\n\t\tif rel:\n\t\t\tfp = Path(rel)\n\t\t\tif not fp.is_absolute():\n\t\t\t\tfp = (local_dir / rel).resolve()\n\t\telse:\n\t\t\tsuffix = f\"_{split}\" if split else \"\"\n\t\t\tfp = (local_dir / f\"{name.replace('/', '_')}{suffix}.jsonl\").resolve()\n\t\t# Try local first\n\t\tout_texts: List[str] = _load_local_jsonl(fp, text_key=text_key, max_n=max_n)\n\t\t# If not present, try HF using 'name' as repo id\n\t\tif not out_texts:\n\t\t\tout_texts = _load_hf(name, subset, split, max_n, cache_dir, streaming=streaming)\n\t\t\t# Persist to local for future runs\n\t\t\tif out_texts:\n\t\t\t\ttry:\n\t\t\t\t\twith open(fp, \"w\", encoding=\"utf-8\") as fh:\n\t\t\t\t\t\tfor t in out_texts:\n\t\t\t\t\t\t\tfh.write(json.dumps({\"text\": t}) + \"\\n\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\tif out_texts:\n\t\t\ttexts_acc.extend(out_texts)\n\t\t\tloaded_names.append(f\"{name}:{split}\" if split else name)\n\treturn texts_acc, loaded_names\n\n\ndef iter_program_texts(example_dir: str, config_path: str | None = None, train_new_only: bool = False, state_path: str | None = None) -> Iterable[Tuple[List[str], str]]:\n\t\"\"\"\n\tYields (texts, source_name) one dataset at a time using the same resolution rules as load_program_texts.\n\tRespects HF streaming and per-source max limits.\n\t\"\"\"\n\tex_dir = Path(example_dir).resolve()\n\tcfg_path = Path(config_path) if config_path else (ex_dir / \"datasets\" / \"config.json\")\n\tcfg = _read_json(cfg_path) if cfg_path.exists() else {}\n\tlocal_dir = Path(cfg.get(\"local_dir\") or (ex_dir / \"datasets\")).resolve()\n\tlocal_dir.mkdir(parents=True, exist_ok=True)\n\tcache_dir = local_dir / \"hf_cache\"\n\tcache_dir.mkdir(parents=True, exist_ok=True)\n\tsources: List[Dict[str, Any]] = list(cfg.get(\"sources\") or [])\n\tstreaming = os.getenv(\"HF_DATASETS_STREAMING\", \"1\") == \"1\"\n\tfull_datasets = os.getenv(\"FULL_DATASETS\", \"1\") == \"1\"\n\t_default_env = os.getenv(\"DATASET_MAX_N_PER_SOURCE\", \"\") or \"0\"\n\ttry:\n\t\tdefault_max_n_val = int(_default_env)","source_hash":"a2681e7cd4a6637fe7e308616893d0611160a5134ef5301a3c142c385e994bee","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.scripts.datasets_loader.iter_program_texts","uri":"program://Program_Conditioned_Adapter/function/examples.scripts.datasets_loader.iter_program_texts#L156-L224","kind":"function","name":"iter_program_texts","path":"examples/scripts/datasets_loader.py","language":"python","start_line":156,"end_line":224,"context_start_line":136,"context_end_line":225,"code":"\t\t\tfp = (local_dir / f\"{name.replace('/', '_')}{suffix}.jsonl\").resolve()\n\t\t# Try local first\n\t\tout_texts: List[str] = _load_local_jsonl(fp, text_key=text_key, max_n=max_n)\n\t\t# If not present, try HF using 'name' as repo id\n\t\tif not out_texts:\n\t\t\tout_texts = _load_hf(name, subset, split, max_n, cache_dir, streaming=streaming)\n\t\t\t# Persist to local for future runs\n\t\t\tif out_texts:\n\t\t\t\ttry:\n\t\t\t\t\twith open(fp, \"w\", encoding=\"utf-8\") as fh:\n\t\t\t\t\t\tfor t in out_texts:\n\t\t\t\t\t\t\tfh.write(json.dumps({\"text\": t}) + \"\\n\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\tif out_texts:\n\t\t\ttexts_acc.extend(out_texts)\n\t\t\tloaded_names.append(f\"{name}:{split}\" if split else name)\n\treturn texts_acc, loaded_names\n\n\ndef iter_program_texts(example_dir: str, config_path: str | None = None, train_new_only: bool = False, state_path: str | None = None) -> Iterable[Tuple[List[str], str]]:\n\t\"\"\"\n\tYields (texts, source_name) one dataset at a time using the same resolution rules as load_program_texts.\n\tRespects HF streaming and per-source max limits.\n\t\"\"\"\n\tex_dir = Path(example_dir).resolve()\n\tcfg_path = Path(config_path) if config_path else (ex_dir / \"datasets\" / \"config.json\")\n\tcfg = _read_json(cfg_path) if cfg_path.exists() else {}\n\tlocal_dir = Path(cfg.get(\"local_dir\") or (ex_dir / \"datasets\")).resolve()\n\tlocal_dir.mkdir(parents=True, exist_ok=True)\n\tcache_dir = local_dir / \"hf_cache\"\n\tcache_dir.mkdir(parents=True, exist_ok=True)\n\tsources: List[Dict[str, Any]] = list(cfg.get(\"sources\") or [])\n\tstreaming = os.getenv(\"HF_DATASETS_STREAMING\", \"1\") == \"1\"\n\tfull_datasets = os.getenv(\"FULL_DATASETS\", \"1\") == \"1\"\n\t_default_env = os.getenv(\"DATASET_MAX_N_PER_SOURCE\", \"\") or \"0\"\n\ttry:\n\t\tdefault_max_n_val = int(_default_env)\n\texcept Exception:\n\t\tdefault_max_n_val = 0\n\tmax_sources = int(os.getenv(\"DATASET_LIMIT_SOURCES\", \"0\") or \"0\") # 0 means no limit\n\n\tseen: List[str] = []\n\tif train_new_only and state_path:\n\t\ttry:\n\t\t\tst = _read_json(Path(state_path))\n\t\t\tseen = list(st.get(\"datasets_seen\") or [])\n\t\texcept Exception:\n\t\t\tseen = []\n\n\tcount_sources = 0\n\tfor src in sources:\n\t\tname = str(src.get(\"name\") or \"\").strip()\n\t\tif not name:\n\t\t\tcontinue\n\t\tif train_new_only and name in seen:\n\t\t\tcontinue\n\t\tif full_datasets:\n\t\t\tmax_n = None\n\t\telse:\n\t\t\tmax_n = src.get(\"max_n\")\n\t\t\tif max_n is None and default_max_n_val > 0:\n\t\t\t\tmax_n = default_max_n_val\n\t\ttext_key = str(src.get(\"text_key\") or \"text\").strip()\n\t\tsubset = src.get(\"subset\")\n\t\tsplit = str(src.get(\"split\") or \"train\").strip()\n\t\trel = str(src.get(\"path\") or \"\").strip()\n\t\tif rel:\n\t\t\tfp = Path(rel)\n\t\t\tif not fp.is_absolute():\n\t\t\t\tfp = (local_dir / rel).resolve()\n\t\telse:\n\t\t\tsuffix = f\"_{split}\" if split else \"\"\n\t\t\tfp = (local_dir / f\"{name.replace('/', '_')}{suffix}.jsonl\").resolve()\n\t\tout_texts: List[str] = _load_local_jsonl(fp, text_key=text_key, max_n=max_n)\n\t\tif not out_texts:\n\t\t\tout_texts = _load_hf(name, subset, split, max_n, cache_dir, streaming=streaming)\n\t\t\tif out_texts:\n\t\t\t\ttry:\n\t\t\t\t\twith open(fp, \"w\", encoding=\"utf-8\") as fh:\n\t\t\t\t\t\tfor t in out_texts:\n\t\t\t\t\t\t\tfh.write(json.dumps({\"text\": t}) + \"\\n\")\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\tif out_texts:\n\t\t\tyield out_texts, (f\"{name}:{split}\" if split else name)\n\t\t\tcount_sources += 1\n\t\t\tif max_sources and count_sources >= max_sources:\n\t\t\t\tbreak\n","source_hash":"a2681e7cd4a6637fe7e308616893d0611160a5134ef5301a3c142c385e994bee","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.emit_repository_knowledge","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.emit_repository_knowledge#L1-L103","kind":"module","name":"examples.python_repo_grounded_qa.emit_repository_knowledge","path":"examples/python_repo_grounded_qa/emit_repository_knowledge.py","language":"python","start_line":1,"end_line":103,"context_start_line":1,"context_end_line":103,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom datetime import datetime, timezone\nfrom typing import Dict, Any\n\n\ndef _resolve_pg_ctor(pg_backend: str):\n mod, _, attr = pg_backend.partition(\":\")\n if not mod or not attr:\n raise ValueError(f\"Invalid ProgramGraph backend '{pg_backend}', expected 'module:ClassName'\")\n m = __import__(mod, fromlist=[attr])\n return getattr(m, attr)\n\n\ndef emit_repository_knowledge(repo_root: str, out_path: str, pg_backend: str) -> str:\n \"\"\"Emit a consolidated repository_knowledge.json with entities, edges, and artifact spans.\"\"\"\n repo_root_abs = os.path.abspath(repo_root)\n ctor = _resolve_pg_ctor(pg_backend)\n pg = ctor(repo_root_abs, ignore=None)\n ents = []\n ents_by_id: Dict[str, Any] = {}\n # Entities\n for e in pg.entities():\n rec = {\n \"id\": e.id,\n \"name\": e.name,\n \"kind\": e.kind,\n \"owner\": e.owner,\n \"uri\": e.uri,\n }\n ents.append(rec)\n ents_by_id[e.id] = rec\n # Edges\n eds = []\n try:\n for ed in pg.edges():\n eds.append({\"src\": ed.src, \"dst\": ed.dst, \"type\": ed.type})\n except Exception:\n eds = []\n # Artifact anchors per entity (best-effort)\n anchors = {}\n for e in pg.entities():\n try:\n ra = pg.resolve(e.uri)\n except Exception:\n continue\n anchors[e.id] = {\n \"artifact_uri\": ra.artifact_uri,\n \"path\": ra.artifact_uri.split(\"/artifact/\", 1)[-1],\n \"span\": {\"start\": int(ra.span.start_line), \"end\": int(ra.span.end_line)},\n \"hash\": ra.hash,\n }\n # Artifacts list (lightweight)\n arts = []\n try:\n for a in pg.artifacts(\"source\"):\n arts.append({\n \"uri\": a.uri,\n \"type\": a.type,\n \"hash\": getattr(a, \"hash\", \"\"),\n \"path\": a.uri.split(\"/artifact/\", 1)[-1],\n })\n except Exception:\n arts = []\n obj = {\n \"schema_version\": 1,\n \"program_id\": getattr(pg, \"program_id\", Path(repo_root_abs).name),\n \"generated_at\": datetime.now(timezone.utc).isoformat().replace(\"+00:00\", \"Z\"),\n \"repo_root\": repo_root_abs,\n \"entities\": ents,\n \"edges\": eds,\n \"anchors\": anchors,\n \"artifacts\": arts,\n }\n os.makedirs(os.path.dirname(out_path), exist_ok=True)\n with open(out_path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n return out_path\n\n\ndef main() -> None:\n # CLI: python emit_repository_knowledge.py \n if len(sys.argv) < 4:\n print(\n \"usage: python emit_repository_knowledge.py \",\n file=sys.stderr,\n )\n sys.exit(2)\n repo_root = sys.argv[1]\n out_path = sys.argv[2]\n pg_backend = sys.argv[3]\n out = emit_repository_knowledge(repo_root, out_path, pg_backend)\n print(out)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"788036183675721d2a0538396deab39f9584c71e443b631b06d85b030c5bb4af","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.emit_repository_knowledge._resolve_pg_ctor","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.emit_repository_knowledge._resolve_pg_ctor#L11-L16","kind":"function","name":"_resolve_pg_ctor","path":"examples/python_repo_grounded_qa/emit_repository_knowledge.py","language":"python","start_line":11,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom datetime import datetime, timezone\nfrom typing import Dict, Any\n\n\ndef _resolve_pg_ctor(pg_backend: str):\n mod, _, attr = pg_backend.partition(\":\")\n if not mod or not attr:\n raise ValueError(f\"Invalid ProgramGraph backend '{pg_backend}', expected 'module:ClassName'\")\n m = __import__(mod, fromlist=[attr])\n return getattr(m, attr)\n\n\ndef emit_repository_knowledge(repo_root: str, out_path: str, pg_backend: str) -> str:\n \"\"\"Emit a consolidated repository_knowledge.json with entities, edges, and artifact spans.\"\"\"\n repo_root_abs = os.path.abspath(repo_root)\n ctor = _resolve_pg_ctor(pg_backend)\n pg = ctor(repo_root_abs, ignore=None)\n ents = []\n ents_by_id: Dict[str, Any] = {}\n # Entities\n for e in pg.entities():\n rec = {\n \"id\": e.id,\n \"name\": e.name,\n \"kind\": e.kind,\n \"owner\": e.owner,\n \"uri\": e.uri,\n }\n ents.append(rec)\n ents_by_id[e.id] = rec","source_hash":"788036183675721d2a0538396deab39f9584c71e443b631b06d85b030c5bb4af","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.emit_repository_knowledge.emit_repository_knowledge","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.emit_repository_knowledge.emit_repository_knowledge#L19-L82","kind":"function","name":"emit_repository_knowledge","path":"examples/python_repo_grounded_qa/emit_repository_knowledge.py","language":"python","start_line":19,"end_line":82,"context_start_line":1,"context_end_line":102,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom datetime import datetime, timezone\nfrom typing import Dict, Any\n\n\ndef _resolve_pg_ctor(pg_backend: str):\n mod, _, attr = pg_backend.partition(\":\")\n if not mod or not attr:\n raise ValueError(f\"Invalid ProgramGraph backend '{pg_backend}', expected 'module:ClassName'\")\n m = __import__(mod, fromlist=[attr])\n return getattr(m, attr)\n\n\ndef emit_repository_knowledge(repo_root: str, out_path: str, pg_backend: str) -> str:\n \"\"\"Emit a consolidated repository_knowledge.json with entities, edges, and artifact spans.\"\"\"\n repo_root_abs = os.path.abspath(repo_root)\n ctor = _resolve_pg_ctor(pg_backend)\n pg = ctor(repo_root_abs, ignore=None)\n ents = []\n ents_by_id: Dict[str, Any] = {}\n # Entities\n for e in pg.entities():\n rec = {\n \"id\": e.id,\n \"name\": e.name,\n \"kind\": e.kind,\n \"owner\": e.owner,\n \"uri\": e.uri,\n }\n ents.append(rec)\n ents_by_id[e.id] = rec\n # Edges\n eds = []\n try:\n for ed in pg.edges():\n eds.append({\"src\": ed.src, \"dst\": ed.dst, \"type\": ed.type})\n except Exception:\n eds = []\n # Artifact anchors per entity (best-effort)\n anchors = {}\n for e in pg.entities():\n try:\n ra = pg.resolve(e.uri)\n except Exception:\n continue\n anchors[e.id] = {\n \"artifact_uri\": ra.artifact_uri,\n \"path\": ra.artifact_uri.split(\"/artifact/\", 1)[-1],\n \"span\": {\"start\": int(ra.span.start_line), \"end\": int(ra.span.end_line)},\n \"hash\": ra.hash,\n }\n # Artifacts list (lightweight)\n arts = []\n try:\n for a in pg.artifacts(\"source\"):\n arts.append({\n \"uri\": a.uri,\n \"type\": a.type,\n \"hash\": getattr(a, \"hash\", \"\"),\n \"path\": a.uri.split(\"/artifact/\", 1)[-1],\n })\n except Exception:\n arts = []\n obj = {\n \"schema_version\": 1,\n \"program_id\": getattr(pg, \"program_id\", Path(repo_root_abs).name),\n \"generated_at\": datetime.now(timezone.utc).isoformat().replace(\"+00:00\", \"Z\"),\n \"repo_root\": repo_root_abs,\n \"entities\": ents,\n \"edges\": eds,\n \"anchors\": anchors,\n \"artifacts\": arts,\n }\n os.makedirs(os.path.dirname(out_path), exist_ok=True)\n with open(out_path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n return out_path\n\n\ndef main() -> None:\n # CLI: python emit_repository_knowledge.py \n if len(sys.argv) < 4:\n print(\n \"usage: python emit_repository_knowledge.py \",\n file=sys.stderr,\n )\n sys.exit(2)\n repo_root = sys.argv[1]\n out_path = sys.argv[2]\n pg_backend = sys.argv[3]\n out = emit_repository_knowledge(repo_root, out_path, pg_backend)\n print(out)\n\n\nif __name__ == \"__main__\":\n main()\n","source_hash":"788036183675721d2a0538396deab39f9584c71e443b631b06d85b030c5bb4af","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.emit_repository_knowledge.main","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.emit_repository_knowledge.main#L85-L97","kind":"function","name":"main","path":"examples/python_repo_grounded_qa/emit_repository_knowledge.py","language":"python","start_line":85,"end_line":97,"context_start_line":65,"context_end_line":103,"code":" \"path\": a.uri.split(\"/artifact/\", 1)[-1],\n })\n except Exception:\n arts = []\n obj = {\n \"schema_version\": 1,\n \"program_id\": getattr(pg, \"program_id\", Path(repo_root_abs).name),\n \"generated_at\": datetime.now(timezone.utc).isoformat().replace(\"+00:00\", \"Z\"),\n \"repo_root\": repo_root_abs,\n \"entities\": ents,\n \"edges\": eds,\n \"anchors\": anchors,\n \"artifacts\": arts,\n }\n os.makedirs(os.path.dirname(out_path), exist_ok=True)\n with open(out_path, \"w\", encoding=\"utf-8\") as fh:\n fh.write(json.dumps(obj, indent=2))\n return out_path\n\n\ndef main() -> None:\n # CLI: python emit_repository_knowledge.py \n if len(sys.argv) < 4:\n print(\n \"usage: python emit_repository_knowledge.py \",\n file=sys.stderr,\n )\n sys.exit(2)\n repo_root = sys.argv[1]\n out_path = sys.argv[2]\n pg_backend = sys.argv[3]\n out = emit_repository_knowledge(repo_root, out_path, pg_backend)\n print(out)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"788036183675721d2a0538396deab39f9584c71e443b631b06d85b030c5bb4af","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.repo_state#L1-L183","kind":"module","name":"examples.python_repo_grounded_qa.repo_state","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":1,"end_line":183,"context_start_line":1,"context_end_line":183,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field, asdict\nfrom typing import Dict, List, Optional, Tuple, Set, Any\nimport os\nimport json\nimport time\nimport math\nimport numpy as np\n\n\ndef _now_ts() -> float:\n try:\n return time.time()\n except Exception:\n return 0.0\n\n\ndef _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n\n\n@dataclass\nclass RepoState:\n version: int = 1\n repo_root: str = \"\"\n # Monotone/idempotent components\n candidates_modules: Set[str] = field(default_factory=set)\n candidates_files: Set[str] = field(default_factory=set)\n facts: Set[Tuple[str, int, int]] = field(default_factory=set) # (rel_path, a, b)\n beh_events: List[Dict[str, Any]] = field(default_factory=list) # append-only\n # Vector view (anytime): keep a unit vector and a running weight\n vec: Optional[List[float]] = None\n vec_weight: float = 0.0\n # Capacity budget H\n H: float = 0.0\n # Last updated\n updated_at: float = 0.0\n\n def checksum(self) -> str:\n try:\n base = {\n \"m\": sorted(list(self.candidates_modules)),\n \"f\": sorted(list(self.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in self.facts]),\n \"vw\": float(self.vec_weight),\n \"vl\": len(self.vec or []),\n \"H\": float(self.H),\n }\n raw = json.dumps(base, sort_keys=True)\n import hashlib as _hh # local\n return _hh.sha1(raw.encode(\"utf-8\", errors=\"ignore\")).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _default_state_path(repo_root: str) -> str:\n try:\n return os.path.join(os.path.abspath(repo_root), \".repo_state.json\")\n except Exception:\n return \".repo_state.json\"\n\n\ndef load_repo_state(repo_root: str, path: Optional[str] = None) -> RepoState:\n p = path or _default_state_path(repo_root)\n try:\n if os.path.isfile(p):\n obj = json.loads(open(p, \"r\", encoding=\"utf-8\").read())\n st = RepoState(\n version=int(obj.get(\"version\", 1)),\n repo_root=str(obj.get(\"repo_root\") or repo_root),\n candidates_modules=set(obj.get(\"candidates_modules\", []) or []),\n candidates_files=set(obj.get(\"candidates_files\", []) or []),\n facts=set((t[0], int(t[1]), int(t[2])) for t in (obj.get(\"facts\", []) or [])),\n beh_events=list(obj.get(\"beh_events\", []) or []),\n vec=(obj.get(\"vec\") if isinstance(obj.get(\"vec\"), list) else None),\n vec_weight=float(obj.get(\"vec_weight\", 0.0) or 0.0),\n H=float(obj.get(\"H\", 0.0) or 0.0),\n updated_at=float(obj.get(\"updated_at\", 0.0) or 0.0),\n )\n return st\n except Exception:\n pass\n return RepoState(version=1, repo_root=os.path.abspath(repo_root))\n\n\ndef save_repo_state(state: RepoState, path: Optional[str] = None) -> None:\n p = path or _default_state_path(state.repo_root)\n try:\n state.updated_at = _now_ts()\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n try:\n obj = {\n \"version\": state.version,\n \"repo_root\": state.repo_root,\n \"candidates_modules\": sorted(list(state.candidates_modules)),\n \"candidates_files\": sorted(list(state.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in state.facts]),\n \"beh_events\": state.beh_events,\n \"vec\": state.vec,\n \"vec_weight\": float(state.vec_weight),\n \"H\": float(state.H),\n \"updated_at\": float(state.updated_at),\n }\n open(p, \"w\", encoding=\"utf-8\").write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef join_repo_states(old: RepoState, new: RepoState) -> RepoState:\n st = RepoState(version=max(int(old.version), int(new.version)))\n st.repo_root = old.repo_root or new.repo_root\n st.candidates_modules = set(old.candidates_modules) | set(new.candidates_modules)\n st.candidates_files = set(old.candidates_files) | set(new.candidates_files)\n st.facts = set(old.facts) | set(new.facts)\n st.beh_events = list(old.beh_events) + list(new.beh_events)\n # Vector join: unit-sum with weights, monotone in weight\n zv_old = np.array(old.vec, dtype=np.float32) if old.vec is not None else None\n zv_new = np.array(new.vec, dtype=np.float32) if new.vec is not None else None\n w_old = float(max(0.0, old.vec_weight))\n w_new = float(max(0.0, new.vec_weight))\n if zv_old is None and zv_new is None:\n st.vec, st.vec_weight = None, float(w_old + w_new)\n elif zv_old is None:\n st.vec, st.vec_weight = list(_unit(zv_new).tolist()), float(w_new + w_old)\n elif zv_new is None:\n st.vec, st.vec_weight = list(_unit(zv_old).tolist()), float(w_old + w_new)\n else:\n try:\n z = (w_old * _unit(zv_old)) + (w_new * _unit(zv_new))\n st.vec = list(_unit(z).tolist())\n except Exception:\n st.vec = list(_unit(zv_old).tolist())\n st.vec_weight = float(w_old + w_new)\n st.H = max(float(old.H), float(new.H))\n st.updated_at = max(float(old.updated_at), float(new.updated_at), _now_ts())\n return st\n\n\ndef changed_bits(prev: RepoState, cur: RepoState) -> bool:\n try:\n if len(cur.candidates_modules) > len(prev.candidates_modules):\n return True\n if len(cur.candidates_files) > len(prev.candidates_files):\n return True\n if len(cur.facts) > len(prev.facts):\n return True\n if float(cur.vec_weight) > float(prev.vec_weight):\n return True\n except Exception:\n return True\n return False\n\n\ndef new_state_from_run(\n repo_root: str,\n *,\n modules: List[str],\n files: List[str],\n citations: List[Tuple[str, int, int]],\n z_vec: Optional[np.ndarray],\n beh_event: Optional[Dict[str, Any]] = None,\n H_increment: float = 0.0,\n) -> RepoState:\n st = RepoState(version=1, repo_root=os.path.abspath(repo_root))\n st.candidates_modules = set([m for m in (modules or []) if m])\n st.candidates_files = set([f for f in (files or []) if f])\n st.facts = set([(p, int(a), int(b)) for (p, a, b) in (citations or [])])\n st.beh_events = ([beh_event] if beh_event else [])\n if z_vec is not None:\n try:\n st.vec = list(_unit(z_vec.astype(np.float32)).tolist())\n st.vec_weight = 1.0\n except Exception:\n st.vec, st.vec_weight = None, 0.0\n st.H = float(max(0.0, H_increment))\n st.updated_at = _now_ts()\n return st\n\n","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state._now_ts","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state._now_ts#L12-L16","kind":"function","name":"_now_ts","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":12,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field, asdict\nfrom typing import Dict, List, Optional, Tuple, Set, Any\nimport os\nimport json\nimport time\nimport math\nimport numpy as np\n\n\ndef _now_ts() -> float:\n try:\n return time.time()\n except Exception:\n return 0.0\n\n\ndef _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n\n\n@dataclass\nclass RepoState:\n version: int = 1\n repo_root: str = \"\"\n # Monotone/idempotent components\n candidates_modules: Set[str] = field(default_factory=set)\n candidates_files: Set[str] = field(default_factory=set)\n facts: Set[Tuple[str, int, int]] = field(default_factory=set) # (rel_path, a, b)\n beh_events: List[Dict[str, Any]] = field(default_factory=list) # append-only\n # Vector view (anytime): keep a unit vector and a running weight\n vec: Optional[List[float]] = None\n vec_weight: float = 0.0\n # Capacity budget H","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state._unit","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state._unit#L19-L21","kind":"function","name":"_unit","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":19,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field, asdict\nfrom typing import Dict, List, Optional, Tuple, Set, Any\nimport os\nimport json\nimport time\nimport math\nimport numpy as np\n\n\ndef _now_ts() -> float:\n try:\n return time.time()\n except Exception:\n return 0.0\n\n\ndef _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n\n\n@dataclass\nclass RepoState:\n version: int = 1\n repo_root: str = \"\"\n # Monotone/idempotent components\n candidates_modules: Set[str] = field(default_factory=set)\n candidates_files: Set[str] = field(default_factory=set)\n facts: Set[Tuple[str, int, int]] = field(default_factory=set) # (rel_path, a, b)\n beh_events: List[Dict[str, Any]] = field(default_factory=list) # append-only\n # Vector view (anytime): keep a unit vector and a running weight\n vec: Optional[List[float]] = None\n vec_weight: float = 0.0\n # Capacity budget H\n H: float = 0.0\n # Last updated\n updated_at: float = 0.0\n\n def checksum(self) -> str:","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state.RepoState","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.repo_state.RepoState#L25-L55","kind":"class","name":"RepoState","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":25,"end_line":55,"context_start_line":5,"context_end_line":75,"code":"import os\nimport json\nimport time\nimport math\nimport numpy as np\n\n\ndef _now_ts() -> float:\n try:\n return time.time()\n except Exception:\n return 0.0\n\n\ndef _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x\n\n\n@dataclass\nclass RepoState:\n version: int = 1\n repo_root: str = \"\"\n # Monotone/idempotent components\n candidates_modules: Set[str] = field(default_factory=set)\n candidates_files: Set[str] = field(default_factory=set)\n facts: Set[Tuple[str, int, int]] = field(default_factory=set) # (rel_path, a, b)\n beh_events: List[Dict[str, Any]] = field(default_factory=list) # append-only\n # Vector view (anytime): keep a unit vector and a running weight\n vec: Optional[List[float]] = None\n vec_weight: float = 0.0\n # Capacity budget H\n H: float = 0.0\n # Last updated\n updated_at: float = 0.0\n\n def checksum(self) -> str:\n try:\n base = {\n \"m\": sorted(list(self.candidates_modules)),\n \"f\": sorted(list(self.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in self.facts]),\n \"vw\": float(self.vec_weight),\n \"vl\": len(self.vec or []),\n \"H\": float(self.H),\n }\n raw = json.dumps(base, sort_keys=True)\n import hashlib as _hh # local\n return _hh.sha1(raw.encode(\"utf-8\", errors=\"ignore\")).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _default_state_path(repo_root: str) -> str:\n try:\n return os.path.join(os.path.abspath(repo_root), \".repo_state.json\")\n except Exception:\n return \".repo_state.json\"\n\n\ndef load_repo_state(repo_root: str, path: Optional[str] = None) -> RepoState:\n p = path or _default_state_path(repo_root)\n try:\n if os.path.isfile(p):\n obj = json.loads(open(p, \"r\", encoding=\"utf-8\").read())\n st = RepoState(\n version=int(obj.get(\"version\", 1)),\n repo_root=str(obj.get(\"repo_root\") or repo_root),\n candidates_modules=set(obj.get(\"candidates_modules\", []) or []),\n candidates_files=set(obj.get(\"candidates_files\", []) or []),\n facts=set((t[0], int(t[1]), int(t[2])) for t in (obj.get(\"facts\", []) or [])),","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state._default_state_path","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state._default_state_path#L58-L62","kind":"function","name":"_default_state_path","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":58,"end_line":62,"context_start_line":38,"context_end_line":82,"code":" # Last updated\n updated_at: float = 0.0\n\n def checksum(self) -> str:\n try:\n base = {\n \"m\": sorted(list(self.candidates_modules)),\n \"f\": sorted(list(self.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in self.facts]),\n \"vw\": float(self.vec_weight),\n \"vl\": len(self.vec or []),\n \"H\": float(self.H),\n }\n raw = json.dumps(base, sort_keys=True)\n import hashlib as _hh # local\n return _hh.sha1(raw.encode(\"utf-8\", errors=\"ignore\")).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _default_state_path(repo_root: str) -> str:\n try:\n return os.path.join(os.path.abspath(repo_root), \".repo_state.json\")\n except Exception:\n return \".repo_state.json\"\n\n\ndef load_repo_state(repo_root: str, path: Optional[str] = None) -> RepoState:\n p = path or _default_state_path(repo_root)\n try:\n if os.path.isfile(p):\n obj = json.loads(open(p, \"r\", encoding=\"utf-8\").read())\n st = RepoState(\n version=int(obj.get(\"version\", 1)),\n repo_root=str(obj.get(\"repo_root\") or repo_root),\n candidates_modules=set(obj.get(\"candidates_modules\", []) or []),\n candidates_files=set(obj.get(\"candidates_files\", []) or []),\n facts=set((t[0], int(t[1]), int(t[2])) for t in (obj.get(\"facts\", []) or [])),\n beh_events=list(obj.get(\"beh_events\", []) or []),\n vec=(obj.get(\"vec\") if isinstance(obj.get(\"vec\"), list) else None),\n vec_weight=float(obj.get(\"vec_weight\", 0.0) or 0.0),\n H=float(obj.get(\"H\", 0.0) or 0.0),\n updated_at=float(obj.get(\"updated_at\", 0.0) or 0.0),\n )\n return st","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state.load_repo_state","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state.load_repo_state#L65-L85","kind":"function","name":"load_repo_state","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":65,"end_line":85,"context_start_line":45,"context_end_line":105,"code":" \"f\": sorted(list(self.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in self.facts]),\n \"vw\": float(self.vec_weight),\n \"vl\": len(self.vec or []),\n \"H\": float(self.H),\n }\n raw = json.dumps(base, sort_keys=True)\n import hashlib as _hh # local\n return _hh.sha1(raw.encode(\"utf-8\", errors=\"ignore\")).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _default_state_path(repo_root: str) -> str:\n try:\n return os.path.join(os.path.abspath(repo_root), \".repo_state.json\")\n except Exception:\n return \".repo_state.json\"\n\n\ndef load_repo_state(repo_root: str, path: Optional[str] = None) -> RepoState:\n p = path or _default_state_path(repo_root)\n try:\n if os.path.isfile(p):\n obj = json.loads(open(p, \"r\", encoding=\"utf-8\").read())\n st = RepoState(\n version=int(obj.get(\"version\", 1)),\n repo_root=str(obj.get(\"repo_root\") or repo_root),\n candidates_modules=set(obj.get(\"candidates_modules\", []) or []),\n candidates_files=set(obj.get(\"candidates_files\", []) or []),\n facts=set((t[0], int(t[1]), int(t[2])) for t in (obj.get(\"facts\", []) or [])),\n beh_events=list(obj.get(\"beh_events\", []) or []),\n vec=(obj.get(\"vec\") if isinstance(obj.get(\"vec\"), list) else None),\n vec_weight=float(obj.get(\"vec_weight\", 0.0) or 0.0),\n H=float(obj.get(\"H\", 0.0) or 0.0),\n updated_at=float(obj.get(\"updated_at\", 0.0) or 0.0),\n )\n return st\n except Exception:\n pass\n return RepoState(version=1, repo_root=os.path.abspath(repo_root))\n\n\ndef save_repo_state(state: RepoState, path: Optional[str] = None) -> None:\n p = path or _default_state_path(state.repo_root)\n try:\n state.updated_at = _now_ts()\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n try:\n obj = {\n \"version\": state.version,\n \"repo_root\": state.repo_root,\n \"candidates_modules\": sorted(list(state.candidates_modules)),\n \"candidates_files\": sorted(list(state.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in state.facts]),\n \"beh_events\": state.beh_events,\n \"vec\": state.vec,\n \"vec_weight\": float(state.vec_weight),\n \"H\": float(state.H),","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state.save_repo_state","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state.save_repo_state#L88-L110","kind":"function","name":"save_repo_state","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":88,"end_line":110,"context_start_line":68,"context_end_line":130,"code":" if os.path.isfile(p):\n obj = json.loads(open(p, \"r\", encoding=\"utf-8\").read())\n st = RepoState(\n version=int(obj.get(\"version\", 1)),\n repo_root=str(obj.get(\"repo_root\") or repo_root),\n candidates_modules=set(obj.get(\"candidates_modules\", []) or []),\n candidates_files=set(obj.get(\"candidates_files\", []) or []),\n facts=set((t[0], int(t[1]), int(t[2])) for t in (obj.get(\"facts\", []) or [])),\n beh_events=list(obj.get(\"beh_events\", []) or []),\n vec=(obj.get(\"vec\") if isinstance(obj.get(\"vec\"), list) else None),\n vec_weight=float(obj.get(\"vec_weight\", 0.0) or 0.0),\n H=float(obj.get(\"H\", 0.0) or 0.0),\n updated_at=float(obj.get(\"updated_at\", 0.0) or 0.0),\n )\n return st\n except Exception:\n pass\n return RepoState(version=1, repo_root=os.path.abspath(repo_root))\n\n\ndef save_repo_state(state: RepoState, path: Optional[str] = None) -> None:\n p = path or _default_state_path(state.repo_root)\n try:\n state.updated_at = _now_ts()\n os.makedirs(os.path.dirname(p), exist_ok=True)\n except Exception:\n pass\n try:\n obj = {\n \"version\": state.version,\n \"repo_root\": state.repo_root,\n \"candidates_modules\": sorted(list(state.candidates_modules)),\n \"candidates_files\": sorted(list(state.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in state.facts]),\n \"beh_events\": state.beh_events,\n \"vec\": state.vec,\n \"vec_weight\": float(state.vec_weight),\n \"H\": float(state.H),\n \"updated_at\": float(state.updated_at),\n }\n open(p, \"w\", encoding=\"utf-8\").write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef join_repo_states(old: RepoState, new: RepoState) -> RepoState:\n st = RepoState(version=max(int(old.version), int(new.version)))\n st.repo_root = old.repo_root or new.repo_root\n st.candidates_modules = set(old.candidates_modules) | set(new.candidates_modules)\n st.candidates_files = set(old.candidates_files) | set(new.candidates_files)\n st.facts = set(old.facts) | set(new.facts)\n st.beh_events = list(old.beh_events) + list(new.beh_events)\n # Vector join: unit-sum with weights, monotone in weight\n zv_old = np.array(old.vec, dtype=np.float32) if old.vec is not None else None\n zv_new = np.array(new.vec, dtype=np.float32) if new.vec is not None else None\n w_old = float(max(0.0, old.vec_weight))\n w_new = float(max(0.0, new.vec_weight))\n if zv_old is None and zv_new is None:\n st.vec, st.vec_weight = None, float(w_old + w_new)\n elif zv_old is None:\n st.vec, st.vec_weight = list(_unit(zv_new).tolist()), float(w_new + w_old)\n elif zv_new is None:\n st.vec, st.vec_weight = list(_unit(zv_old).tolist()), float(w_old + w_new)","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state.join_repo_states","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state.join_repo_states#L113-L140","kind":"function","name":"join_repo_states","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":113,"end_line":140,"context_start_line":93,"context_end_line":160,"code":" except Exception:\n pass\n try:\n obj = {\n \"version\": state.version,\n \"repo_root\": state.repo_root,\n \"candidates_modules\": sorted(list(state.candidates_modules)),\n \"candidates_files\": sorted(list(state.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in state.facts]),\n \"beh_events\": state.beh_events,\n \"vec\": state.vec,\n \"vec_weight\": float(state.vec_weight),\n \"H\": float(state.H),\n \"updated_at\": float(state.updated_at),\n }\n open(p, \"w\", encoding=\"utf-8\").write(json.dumps(obj, indent=2))\n except Exception:\n pass\n\n\ndef join_repo_states(old: RepoState, new: RepoState) -> RepoState:\n st = RepoState(version=max(int(old.version), int(new.version)))\n st.repo_root = old.repo_root or new.repo_root\n st.candidates_modules = set(old.candidates_modules) | set(new.candidates_modules)\n st.candidates_files = set(old.candidates_files) | set(new.candidates_files)\n st.facts = set(old.facts) | set(new.facts)\n st.beh_events = list(old.beh_events) + list(new.beh_events)\n # Vector join: unit-sum with weights, monotone in weight\n zv_old = np.array(old.vec, dtype=np.float32) if old.vec is not None else None\n zv_new = np.array(new.vec, dtype=np.float32) if new.vec is not None else None\n w_old = float(max(0.0, old.vec_weight))\n w_new = float(max(0.0, new.vec_weight))\n if zv_old is None and zv_new is None:\n st.vec, st.vec_weight = None, float(w_old + w_new)\n elif zv_old is None:\n st.vec, st.vec_weight = list(_unit(zv_new).tolist()), float(w_new + w_old)\n elif zv_new is None:\n st.vec, st.vec_weight = list(_unit(zv_old).tolist()), float(w_old + w_new)\n else:\n try:\n z = (w_old * _unit(zv_old)) + (w_new * _unit(zv_new))\n st.vec = list(_unit(z).tolist())\n except Exception:\n st.vec = list(_unit(zv_old).tolist())\n st.vec_weight = float(w_old + w_new)\n st.H = max(float(old.H), float(new.H))\n st.updated_at = max(float(old.updated_at), float(new.updated_at), _now_ts())\n return st\n\n\ndef changed_bits(prev: RepoState, cur: RepoState) -> bool:\n try:\n if len(cur.candidates_modules) > len(prev.candidates_modules):\n return True\n if len(cur.candidates_files) > len(prev.candidates_files):\n return True\n if len(cur.facts) > len(prev.facts):\n return True\n if float(cur.vec_weight) > float(prev.vec_weight):\n return True\n except Exception:\n return True\n return False\n\n\ndef new_state_from_run(\n repo_root: str,\n *,","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state.changed_bits","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state.changed_bits#L143-L155","kind":"function","name":"changed_bits","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":143,"end_line":155,"context_start_line":123,"context_end_line":175,"code":" w_old = float(max(0.0, old.vec_weight))\n w_new = float(max(0.0, new.vec_weight))\n if zv_old is None and zv_new is None:\n st.vec, st.vec_weight = None, float(w_old + w_new)\n elif zv_old is None:\n st.vec, st.vec_weight = list(_unit(zv_new).tolist()), float(w_new + w_old)\n elif zv_new is None:\n st.vec, st.vec_weight = list(_unit(zv_old).tolist()), float(w_old + w_new)\n else:\n try:\n z = (w_old * _unit(zv_old)) + (w_new * _unit(zv_new))\n st.vec = list(_unit(z).tolist())\n except Exception:\n st.vec = list(_unit(zv_old).tolist())\n st.vec_weight = float(w_old + w_new)\n st.H = max(float(old.H), float(new.H))\n st.updated_at = max(float(old.updated_at), float(new.updated_at), _now_ts())\n return st\n\n\ndef changed_bits(prev: RepoState, cur: RepoState) -> bool:\n try:\n if len(cur.candidates_modules) > len(prev.candidates_modules):\n return True\n if len(cur.candidates_files) > len(prev.candidates_files):\n return True\n if len(cur.facts) > len(prev.facts):\n return True\n if float(cur.vec_weight) > float(prev.vec_weight):\n return True\n except Exception:\n return True\n return False\n\n\ndef new_state_from_run(\n repo_root: str,\n *,\n modules: List[str],\n files: List[str],\n citations: List[Tuple[str, int, int]],\n z_vec: Optional[np.ndarray],\n beh_event: Optional[Dict[str, Any]] = None,\n H_increment: float = 0.0,\n) -> RepoState:\n st = RepoState(version=1, repo_root=os.path.abspath(repo_root))\n st.candidates_modules = set([m for m in (modules or []) if m])\n st.candidates_files = set([f for f in (files or []) if f])\n st.facts = set([(p, int(a), int(b)) for (p, a, b) in (citations or [])])\n st.beh_events = ([beh_event] if beh_event else [])\n if z_vec is not None:\n try:\n st.vec = list(_unit(z_vec.astype(np.float32)).tolist())","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state.new_state_from_run","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state.new_state_from_run#L158-L181","kind":"function","name":"new_state_from_run","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":158,"end_line":181,"context_start_line":138,"context_end_line":183,"code":" st.H = max(float(old.H), float(new.H))\n st.updated_at = max(float(old.updated_at), float(new.updated_at), _now_ts())\n return st\n\n\ndef changed_bits(prev: RepoState, cur: RepoState) -> bool:\n try:\n if len(cur.candidates_modules) > len(prev.candidates_modules):\n return True\n if len(cur.candidates_files) > len(prev.candidates_files):\n return True\n if len(cur.facts) > len(prev.facts):\n return True\n if float(cur.vec_weight) > float(prev.vec_weight):\n return True\n except Exception:\n return True\n return False\n\n\ndef new_state_from_run(\n repo_root: str,\n *,\n modules: List[str],\n files: List[str],\n citations: List[Tuple[str, int, int]],\n z_vec: Optional[np.ndarray],\n beh_event: Optional[Dict[str, Any]] = None,\n H_increment: float = 0.0,\n) -> RepoState:\n st = RepoState(version=1, repo_root=os.path.abspath(repo_root))\n st.candidates_modules = set([m for m in (modules or []) if m])\n st.candidates_files = set([f for f in (files or []) if f])\n st.facts = set([(p, int(a), int(b)) for (p, a, b) in (citations or [])])\n st.beh_events = ([beh_event] if beh_event else [])\n if z_vec is not None:\n try:\n st.vec = list(_unit(z_vec.astype(np.float32)).tolist())\n st.vec_weight = 1.0\n except Exception:\n st.vec, st.vec_weight = None, 0.0\n st.H = float(max(0.0, H_increment))\n st.updated_at = _now_ts()\n return st\n\n","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.repo_state.checksum","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.repo_state.checksum#L41-L55","kind":"function","name":"checksum","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":41,"end_line":55,"context_start_line":21,"context_end_line":75,"code":" return (x / n) if n > 0 else x\n\n\n@dataclass\nclass RepoState:\n version: int = 1\n repo_root: str = \"\"\n # Monotone/idempotent components\n candidates_modules: Set[str] = field(default_factory=set)\n candidates_files: Set[str] = field(default_factory=set)\n facts: Set[Tuple[str, int, int]] = field(default_factory=set) # (rel_path, a, b)\n beh_events: List[Dict[str, Any]] = field(default_factory=list) # append-only\n # Vector view (anytime): keep a unit vector and a running weight\n vec: Optional[List[float]] = None\n vec_weight: float = 0.0\n # Capacity budget H\n H: float = 0.0\n # Last updated\n updated_at: float = 0.0\n\n def checksum(self) -> str:\n try:\n base = {\n \"m\": sorted(list(self.candidates_modules)),\n \"f\": sorted(list(self.candidates_files)),\n \"facts\": sorted([(p, int(a), int(b)) for (p, a, b) in self.facts]),\n \"vw\": float(self.vec_weight),\n \"vl\": len(self.vec or []),\n \"H\": float(self.H),\n }\n raw = json.dumps(base, sort_keys=True)\n import hashlib as _hh # local\n return _hh.sha1(raw.encode(\"utf-8\", errors=\"ignore\")).hexdigest()\n except Exception:\n return \"\"\n\n\ndef _default_state_path(repo_root: str) -> str:\n try:\n return os.path.join(os.path.abspath(repo_root), \".repo_state.json\")\n except Exception:\n return \".repo_state.json\"\n\n\ndef load_repo_state(repo_root: str, path: Optional[str] = None) -> RepoState:\n p = path or _default_state_path(repo_root)\n try:\n if os.path.isfile(p):\n obj = json.loads(open(p, \"r\", encoding=\"utf-8\").read())\n st = RepoState(\n version=int(obj.get(\"version\", 1)),\n repo_root=str(obj.get(\"repo_root\") or repo_root),\n candidates_modules=set(obj.get(\"candidates_modules\", []) or []),\n candidates_files=set(obj.get(\"candidates_files\", []) or []),\n facts=set((t[0], int(t[1]), int(t[2])) for t in (obj.get(\"facts\", []) or [])),","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.run_smoke_example","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.run_smoke_example#L1-L92","kind":"module","name":"examples.python_repo_grounded_qa.run_smoke_example","path":"examples/python_repo_grounded_qa/run_smoke_example.py","language":"python","start_line":1,"end_line":92,"context_start_line":1,"context_end_line":92,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\n# Backend class name (implemented in this example package)\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration loader\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n\ndef main() -> None:\n cfg = load_program_config(str(SMOKE_REPO))\n adapters_dir = cfg.paths.adapters_dir\n adapters_dir.mkdir(parents=True, exist_ok=True)\n # Resolve backend module path if a bare class name is provided\n pg_backend = PG_BACKEND\n if \":\" not in pg_backend:\n pg_backend = cfg.pg_backend\n # Emit consolidated repository knowledge (entities/edges/anchors) for grounding\n knowledge_path = cfg.paths.knowledge_path\n _run([\n sys.executable,\n \"-m\",\n \"examples.python_repo_grounded_qa.emit_repository_knowledge\",\n str(SMOKE_REPO),\n str(knowledge_path),\n pg_backend,\n ])\n # Build adapters and caches for the smoke repo (PCA-agnostic; PG injected via --pg-backend)\n rc = _run([\n sys.executable,\n \"-m\",\n \"build\",\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--embed-dim\", \"256\",\n \"--include-text\",\n \"--text-max-bytes\", \"20000\",\n \"--pg-backend\", pg_backend,\n \"--contracts-require-citations\",\n \"--contracts-retrieval-policy\", str(cfg.contracts.retrieval_policy),\n \"--contracts-retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--init-program-state\",\n \"--program-state-path\", str(cfg.paths.program_state_path),\n \"--seed\", \"0\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n # Run grounded QA (selection and stamping via ProgramGraph plugin)\n prompt = \"Where is the add function defined and how is it used? Provide citations.\"\n rc = _run([\n sys.executable,\n \"-m\",\n \"run\",\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--prompt\", prompt,\n \"--of-sources\", \"question\",\n \"--pack-context\",\n \"--pack-mode\", \"windows\",\n \"--context-tokens\", \"800\",\n \"--require-citations\",\n \"--structured\",\n \"--citations-enforce\",\n \"--pg-backend\", pg_backend,\n \"--retrieval-policy\", str(cfg.contracts.retrieval_policy),\n \"--retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--program-state\", str(cfg.paths.program_state_path),\n \"--delta-cap\", \"0.05\",\n \"--verbose\",\n ])\n sys.exit(rc)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"92ed0427a371a73d25260fec5c834daa2157daf6718fb2ceb563e5669b425dc0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.run_smoke_example._run","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.run_smoke_example._run#L17-L19","kind":"function","name":"_run","path":"examples/python_repo_grounded_qa/run_smoke_example.py","language":"python","start_line":17,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\n# Backend class name (implemented in this example package)\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration loader\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n\ndef main() -> None:\n cfg = load_program_config(str(SMOKE_REPO))\n adapters_dir = cfg.paths.adapters_dir\n adapters_dir.mkdir(parents=True, exist_ok=True)\n # Resolve backend module path if a bare class name is provided\n pg_backend = PG_BACKEND\n if \":\" not in pg_backend:\n pg_backend = cfg.pg_backend\n # Emit consolidated repository knowledge (entities/edges/anchors) for grounding\n knowledge_path = cfg.paths.knowledge_path\n _run([\n sys.executable,\n \"-m\",\n \"examples.python_repo_grounded_qa.emit_repository_knowledge\",\n str(SMOKE_REPO),\n str(knowledge_path),\n pg_backend,\n ])","source_hash":"92ed0427a371a73d25260fec5c834daa2157daf6718fb2ceb563e5669b425dc0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.run_smoke_example.main","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.run_smoke_example.main#L22-L86","kind":"function","name":"main","path":"examples/python_repo_grounded_qa/run_smoke_example.py","language":"python","start_line":22,"end_line":86,"context_start_line":2,"context_end_line":92,"code":"\nimport os\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\n# Backend class name (implemented in this example package)\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration loader\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n\ndef main() -> None:\n cfg = load_program_config(str(SMOKE_REPO))\n adapters_dir = cfg.paths.adapters_dir\n adapters_dir.mkdir(parents=True, exist_ok=True)\n # Resolve backend module path if a bare class name is provided\n pg_backend = PG_BACKEND\n if \":\" not in pg_backend:\n pg_backend = cfg.pg_backend\n # Emit consolidated repository knowledge (entities/edges/anchors) for grounding\n knowledge_path = cfg.paths.knowledge_path\n _run([\n sys.executable,\n \"-m\",\n \"examples.python_repo_grounded_qa.emit_repository_knowledge\",\n str(SMOKE_REPO),\n str(knowledge_path),\n pg_backend,\n ])\n # Build adapters and caches for the smoke repo (PCA-agnostic; PG injected via --pg-backend)\n rc = _run([\n sys.executable,\n \"-m\",\n \"build\",\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--embed-dim\", \"256\",\n \"--include-text\",\n \"--text-max-bytes\", \"20000\",\n \"--pg-backend\", pg_backend,\n \"--contracts-require-citations\",\n \"--contracts-retrieval-policy\", str(cfg.contracts.retrieval_policy),\n \"--contracts-retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--init-program-state\",\n \"--program-state-path\", str(cfg.paths.program_state_path),\n \"--seed\", \"0\",\n \"--verbose\",\n ])\n if rc != 0:\n sys.exit(rc)\n # Run grounded QA (selection and stamping via ProgramGraph plugin)\n prompt = \"Where is the add function defined and how is it used? Provide citations.\"\n rc = _run([\n sys.executable,\n \"-m\",\n \"run\",\n \"--sources\", str(SMOKE_REPO),\n \"--model\", \"meta-llama/Llama-3.1-8B-Instruct\",\n \"--adapters-dir\", str(adapters_dir),\n \"--prompt\", prompt,\n \"--of-sources\", \"question\",\n \"--pack-context\",\n \"--pack-mode\", \"windows\",\n \"--context-tokens\", \"800\",\n \"--require-citations\",\n \"--structured\",\n \"--citations-enforce\",\n \"--pg-backend\", pg_backend,\n \"--retrieval-policy\", str(cfg.contracts.retrieval_policy),\n \"--retrieval-temp\", str(cfg.contracts.retrieval_temp),\n \"--program-state\", str(cfg.paths.program_state_path),\n \"--delta-cap\", \"0.05\",\n \"--verbose\",\n ])\n sys.exit(rc)\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"92ed0427a371a73d25260fec5c834daa2157daf6718fb2ceb563e5669b425dc0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.program_config","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.program_config#L1-L60","kind":"module","name":"examples.python_repo_grounded_qa.program_config","path":"examples/python_repo_grounded_qa/program_config.py","language":"python","start_line":1,"end_line":60,"context_start_line":1,"context_end_line":60,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_path: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(base_dir: Path) -> str:\n pkg = \"examples.python_repo_grounded_qa\"\n if (base_dir / \"python_repo_graph.py\").exists():\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n if (base_dir / \"repo_graph.py\").exists():\n return f\"{pkg}.repo_graph:PythonRepoGraph\"\n # Fallback to python_repo_graph by name\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n ex_dir = Path(__file__).resolve().parents[2]\n example_dir = Path(__file__).resolve().parent\n adapters_dir = ex_dir / \"artifacts\" / \"smoke_base\"\n knowledge_path = adapters_dir / \"repository_knowledge.json\"\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"\n return ProgramConfig(\n program_id=program_id,\n pg_backend=pg_backend,\n paths=ProgramPaths(\n adapters_dir=adapters_dir,\n knowledge_path=knowledge_path,\n program_state_path=adapters_dir / \".program_state.json\",\n ),\n contracts=ProgramContracts(),\n )\n\n","source_hash":"478bd1b12009db72e078bf222582a9c2cdf8cdedc2c722fc22424d4953389d08","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.program_config.ProgramContracts","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.program_config.ProgramContracts#L9-L13","kind":"class","name":"ProgramContracts","path":"examples/python_repo_grounded_qa/program_config.py","language":"python","start_line":9,"end_line":13,"context_start_line":1,"context_end_line":33,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_path: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(base_dir: Path) -> str:\n pkg = \"examples.python_repo_grounded_qa\"\n if (base_dir / \"python_repo_graph.py\").exists():","source_hash":"478bd1b12009db72e078bf222582a9c2cdf8cdedc2c722fc22424d4953389d08","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.program_config.ProgramPaths","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.program_config.ProgramPaths#L17-L20","kind":"class","name":"ProgramPaths","path":"examples/python_repo_grounded_qa/program_config.py","language":"python","start_line":17,"end_line":20,"context_start_line":1,"context_end_line":40,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_path: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(base_dir: Path) -> str:\n pkg = \"examples.python_repo_grounded_qa\"\n if (base_dir / \"python_repo_graph.py\").exists():\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n if (base_dir / \"repo_graph.py\").exists():\n return f\"{pkg}.repo_graph:PythonRepoGraph\"\n # Fallback to python_repo_graph by name\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n\n","source_hash":"478bd1b12009db72e078bf222582a9c2cdf8cdedc2c722fc22424d4953389d08","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.program_config.ProgramConfig","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.program_config.ProgramConfig#L24-L28","kind":"class","name":"ProgramConfig","path":"examples/python_repo_grounded_qa/program_config.py","language":"python","start_line":24,"end_line":28,"context_start_line":4,"context_end_line":48,"code":"from pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_path: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(base_dir: Path) -> str:\n pkg = \"examples.python_repo_grounded_qa\"\n if (base_dir / \"python_repo_graph.py\").exists():\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n if (base_dir / \"repo_graph.py\").exists():\n return f\"{pkg}.repo_graph:PythonRepoGraph\"\n # Fallback to python_repo_graph by name\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n ex_dir = Path(__file__).resolve().parents[2]\n example_dir = Path(__file__).resolve().parent\n adapters_dir = ex_dir / \"artifacts\" / \"smoke_base\"\n knowledge_path = adapters_dir / \"repository_knowledge.json\"\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"","source_hash":"478bd1b12009db72e078bf222582a9c2cdf8cdedc2c722fc22424d4953389d08","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.program_config._detect_pg_backend","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.program_config._detect_pg_backend#L31-L38","kind":"function","name":"_detect_pg_backend","path":"examples/python_repo_grounded_qa/program_config.py","language":"python","start_line":31,"end_line":38,"context_start_line":11,"context_end_line":58,"code":" citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_path: Path\n program_state_path: Optional[Path] = None\n\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(base_dir: Path) -> str:\n pkg = \"examples.python_repo_grounded_qa\"\n if (base_dir / \"python_repo_graph.py\").exists():\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n if (base_dir / \"repo_graph.py\").exists():\n return f\"{pkg}.repo_graph:PythonRepoGraph\"\n # Fallback to python_repo_graph by name\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n ex_dir = Path(__file__).resolve().parents[2]\n example_dir = Path(__file__).resolve().parent\n adapters_dir = ex_dir / \"artifacts\" / \"smoke_base\"\n knowledge_path = adapters_dir / \"repository_knowledge.json\"\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"\n return ProgramConfig(\n program_id=program_id,\n pg_backend=pg_backend,\n paths=ProgramPaths(\n adapters_dir=adapters_dir,\n knowledge_path=knowledge_path,\n program_state_path=adapters_dir / \".program_state.json\",\n ),\n contracts=ProgramContracts(),\n )","source_hash":"478bd1b12009db72e078bf222582a9c2cdf8cdedc2c722fc22424d4953389d08","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.program_config.load_program_config","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.program_config.load_program_config#L41-L58","kind":"function","name":"load_program_config","path":"examples/python_repo_grounded_qa/program_config.py","language":"python","start_line":41,"end_line":58,"context_start_line":21,"context_end_line":60,"code":"\n\n@dataclass(frozen=True)\nclass ProgramConfig:\n program_id: str\n pg_backend: str # dotted path module:Class\n paths: ProgramPaths\n contracts: ProgramContracts\n\n\ndef _detect_pg_backend(base_dir: Path) -> str:\n pkg = \"examples.python_repo_grounded_qa\"\n if (base_dir / \"python_repo_graph.py\").exists():\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n if (base_dir / \"repo_graph.py\").exists():\n return f\"{pkg}.repo_graph:PythonRepoGraph\"\n # Fallback to python_repo_graph by name\n return f\"{pkg}.python_repo_graph:PythonRepoGraph\"\n\n\ndef load_program_config(repo_root: str) -> ProgramConfig:\n repo = Path(repo_root).resolve()\n ex_dir = Path(__file__).resolve().parents[2]\n example_dir = Path(__file__).resolve().parent\n adapters_dir = ex_dir / \"artifacts\" / \"smoke_base\"\n knowledge_path = adapters_dir / \"repository_knowledge.json\"\n pg_backend = _detect_pg_backend(example_dir)\n program_id = repo.name or \"repo\"\n return ProgramConfig(\n program_id=program_id,\n pg_backend=pg_backend,\n paths=ProgramPaths(\n adapters_dir=adapters_dir,\n knowledge_path=knowledge_path,\n program_state_path=adapters_dir / \".program_state.json\",\n ),\n contracts=ProgramContracts(),\n )\n\n","source_hash":"478bd1b12009db72e078bf222582a9c2cdf8cdedc2c722fc22424d4953389d08","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.modules.codegraph_core#L1-L200","kind":"module","name":"examples.python_repo_grounded_qa.modules.codegraph_core","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":1,"end_line":200,"context_start_line":1,"context_end_line":200,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)\n except Exception:\n tree = None\n if tree is None:\n continue\n # functions/classes\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n fid = f\"py:{mod}.{name}\"\n self.entities_by_id[fid] = CGEntity(\n id=fid, kind=\"function\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(fid)\n self.edges_list.append(CGEdge(src=mid, dst=fid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(fid)\n elif isinstance(node, ast.ClassDef):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n cid = f\"py:{mod}.{name}\"\n self.entities_by_id[cid] = CGEntity(\n id=cid, kind=\"class\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(cid)\n self.edges_list.append(CGEdge(src=mid, dst=cid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(cid)\n # imports (module-level)\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for alias in node.names:\n im = alias.name\n if not im:\n continue\n # record import edge to module-name id if present later; store raw as meta by name\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n elif isinstance(node, ast.ImportFrom):\n im = node.module or \"\"\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n except Exception:\n pass\n # calls (best-effort): record identifiers used in Call nodes\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Call):\n fn = getattr(node, \"func\", None)\n name = None\n if isinstance(fn, ast.Attribute):\n name = getattr(fn.attr, \"lower\", None)\n name = fn.attr if fn and hasattr(fn, \"attr\") else None\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = name.lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.FileSpan","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.modules.codegraph_core.FileSpan#L11-L14","kind":"class","name":"FileSpan","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":11,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.CGEntity","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.modules.codegraph_core.CGEntity#L18-L25","kind":"class","name":"CGEntity","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":18,"end_line":25,"context_start_line":1,"context_end_line":45,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.CGEdge","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.modules.codegraph_core.CGEdge#L29-L32","kind":"class","name":"CGEdge","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":29,"end_line":32,"context_start_line":9,"context_end_line":52,"code":"\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.CodeGraph","uri":"program://Program_Conditioned_Adapter/class/examples.python_repo_grounded_qa.modules.codegraph_core.CodeGraph#L35-L198","kind":"class","name":"CodeGraph","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":35,"end_line":198,"context_start_line":15,"context_end_line":200,"code":"\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)\n except Exception:\n tree = None\n if tree is None:\n continue\n # functions/classes\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n fid = f\"py:{mod}.{name}\"\n self.entities_by_id[fid] = CGEntity(\n id=fid, kind=\"function\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(fid)\n self.edges_list.append(CGEdge(src=mid, dst=fid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(fid)\n elif isinstance(node, ast.ClassDef):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n cid = f\"py:{mod}.{name}\"\n self.entities_by_id[cid] = CGEntity(\n id=cid, kind=\"class\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(cid)\n self.edges_list.append(CGEdge(src=mid, dst=cid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(cid)\n # imports (module-level)\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for alias in node.names:\n im = alias.name\n if not im:\n continue\n # record import edge to module-name id if present later; store raw as meta by name\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n elif isinstance(node, ast.ImportFrom):\n im = node.module or \"\"\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n except Exception:\n pass\n # calls (best-effort): record identifiers used in Call nodes\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Call):\n fn = getattr(node, \"func\", None)\n name = None\n if isinstance(fn, ast.Attribute):\n name = getattr(fn.attr, \"lower\", None)\n name = fn.attr if fn and hasattr(fn, \"attr\") else None\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = name.lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.__init__","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core.__init__#L36-L44","kind":"function","name":"__init__","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":36,"end_line":44,"context_start_line":16,"context_end_line":64,"code":"\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str\n file: str # absolute path\n owner: Optional[str]\n start_line: int\n end_line: int\n\n\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.build","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core.build#L47-L139","kind":"function","name":"build","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":47,"end_line":139,"context_start_line":27,"context_end_line":159,"code":"\n@dataclass\nclass CGEdge:\n src: str # CGEntity.id\n dst: str # CGEntity.id\n type: str # imports|calls|owns|tests\n\n\nclass CodeGraph:\n def __init__(self, repo_root: str, ignore: Optional[List[str]] = None):\n self.root = os.path.abspath(repo_root)\n self.ignore_rules = [s for s in (ignore or []) if s]\n self.entities_by_id: Dict[str, CGEntity] = {}\n self.edges_list: List[CGEdge] = []\n self._file_hash: Dict[str, str] = {}\n self._id_by_module: Dict[str, str] = {}\n self._ids_by_file: Dict[str, List[str]] = {}\n self._index_identifiers: Dict[str, List[str]] = {}\n\n # Build\n def build(self) -> \"CodeGraph\":\n py_files = self._discover_py_files(self.root, self.ignore_rules)\n for abs_fp in py_files:\n mod = self._module_name_for(abs_fp)\n mid = f\"py:{mod}\"\n self._id_by_module[mod] = mid\n ent = CGEntity(\n id=mid, kind=\"module\", name=mod, file=abs_fp, owner=None,\n start_line=1, end_line=self._safe_count_lines(abs_fp),\n )\n self.entities_by_id[mid] = ent\n self._ids_by_file.setdefault(abs_fp, []).append(mid)\n self._index_identifiers.setdefault(mod.lower(), []).append(mid)\n # Parse AST for defs/imports/calls\n try:\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n src = fh.read()\n tree = ast.parse(src)\n except Exception:\n tree = None\n if tree is None:\n continue\n # functions/classes\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n fid = f\"py:{mod}.{name}\"\n self.entities_by_id[fid] = CGEntity(\n id=fid, kind=\"function\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(fid)\n self.edges_list.append(CGEdge(src=mid, dst=fid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(fid)\n elif isinstance(node, ast.ClassDef):\n name = getattr(node, \"name\", \"\")\n a = int(getattr(node, \"lineno\", 1))\n b = int(getattr(node, \"end_lineno\", a))\n cid = f\"py:{mod}.{name}\"\n self.entities_by_id[cid] = CGEntity(\n id=cid, kind=\"class\", name=name, file=abs_fp, owner=mid,\n start_line=a, end_line=b,\n )\n self._ids_by_file.setdefault(abs_fp, []).append(cid)\n self.edges_list.append(CGEdge(src=mid, dst=cid, type=\"owns\"))\n self._index_identifiers.setdefault(name.lower(), []).append(cid)\n # imports (module-level)\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for alias in node.names:\n im = alias.name\n if not im:\n continue\n # record import edge to module-name id if present later; store raw as meta by name\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n elif isinstance(node, ast.ImportFrom):\n im = node.module or \"\"\n if not im:\n continue\n tgt_mod = im\n tid = f\"py:{tgt_mod}\"\n self.edges_list.append(CGEdge(src=mid, dst=tid, type=\"imports\"))\n except Exception:\n pass\n # calls (best-effort): record identifiers used in Call nodes\n try:\n for node in ast.walk(tree):\n if isinstance(node, ast.Call):\n fn = getattr(node, \"func\", None)\n name = None\n if isinstance(fn, ast.Attribute):\n name = getattr(fn.attr, \"lower\", None)\n name = fn.attr if fn and hasattr(fn, \"attr\") else None\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = name.lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.entities","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core.entities#L142-L143","kind":"function","name":"entities","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":142,"end_line":143,"context_start_line":122,"context_end_line":163,"code":" if isinstance(fn, ast.Attribute):\n name = getattr(fn.attr, \"lower\", None)\n name = fn.attr if fn and hasattr(fn, \"attr\") else None\n elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = name.lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.edges","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core.edges#L145-L150","kind":"function","name":"edges","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":145,"end_line":150,"context_start_line":125,"context_end_line":170,"code":" elif isinstance(fn, ast.Name):\n name = fn.id\n if name:\n lid = name.lower()\n for cand in self._index_identifiers.get(lid, []):\n self.edges_list.append(CGEdge(src=mid, dst=cand, type=\"calls\"))\n except Exception:\n pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.file_hash","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core.file_hash#L152-L153","kind":"function","name":"file_hash","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":152,"end_line":153,"context_start_line":132,"context_end_line":173,"code":" pass\n # tests tag\n base = os.path.basename(abs_fp)\n if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.ids_for_file","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core.ids_for_file#L155-L156","kind":"function","name":"ids_for_file","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":155,"end_line":156,"context_start_line":135,"context_end_line":176,"code":" if base.startswith(\"test_\") or base.endswith(\"_test.py\"):\n self.entities_by_id[mid].kind = \"test_module\"\n # finalize file hashes\n self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core.find_identifier_ids","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core.find_identifier_ids#L158-L159","kind":"function","name":"find_identifier_ids","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":158,"end_line":159,"context_start_line":138,"context_end_line":179,"code":" self._precompute_hashes(py_files)\n return self\n\n # Public accessors\n def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core._discover_py_files","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core._discover_py_files#L162-L173","kind":"function","name":"_discover_py_files","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":162,"end_line":173,"context_start_line":142,"context_end_line":193,"code":" def entities(self) -> Iterable[CGEntity]:\n return self.entities_by_id.values()\n\n def edges(self) -> Iterable[CGEdge]:\n # Filter edges whose endpoints are known (post totality)\n known = set(self.entities_by_id.keys())\n for e in self.edges_list:\n if (e.src in known) and (e.dst in known):\n yield e\n\n def file_hash(self, abs_path: str) -> str:\n return self._file_hash.get(abs_path) or \"\"\n\n def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core._module_name_for","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core._module_name_for#L175-L181","kind":"function","name":"_module_name_for","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":175,"end_line":181,"context_start_line":155,"context_end_line":200,"code":" def ids_for_file(self, abs_path: str) -> List[str]:\n return list(self._ids_by_file.get(abs_path, []))\n\n def find_identifier_ids(self, token: str) -> List[str]:\n return list(self._index_identifiers.get(token.lower(), []))\n\n # Helpers\n def _discover_py_files(self, root: str, ignore: List[str]) -> List[str]:\n out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core._safe_count_lines","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core._safe_count_lines#L183-L188","kind":"function","name":"_safe_count_lines","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":183,"end_line":188,"context_start_line":163,"context_end_line":200,"code":" out: List[str] = []\n for dirpath, dirnames, filenames in os.walk(root):\n # naive ignore: drop segments that contain any ignore pattern\n if any(ig in dirpath for ig in ignore):\n continue\n for fn in filenames:\n if not fn.endswith(\".py\"):\n continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.codegraph_core._precompute_hashes","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.codegraph_core._precompute_hashes#L190-L198","kind":"function","name":"_precompute_hashes","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":190,"end_line":198,"context_start_line":170,"context_end_line":200,"code":" continue\n ap = os.path.abspath(os.path.join(dirpath, fn))\n out.append(ap)\n return out\n\n def _module_name_for(self, abs_file: str) -> str:\n # repo-relative without extension, path with dots\n rel = os.path.relpath(abs_file, self.root).replace(\"\\\\\", \"/\")\n if rel.endswith(\".py\"):\n rel = rel[:-3]\n parts = [p for p in rel.split(\"/\") if p and p != \"__init__\"]\n return \".\".join(parts) or os.path.splitext(os.path.basename(abs_file))[0]\n\n def _safe_count_lines(self, abs_file: str) -> int:\n try:\n with open(abs_file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n return sum(1 for _ in fh)\n except Exception:\n return 1\n\n def _precompute_hashes(self, files: List[str]) -> None:\n for fp in files:\n try:\n with open(fp, \"rb\") as fh:\n raw = fh.read()\n h = hashlib.sha256(raw).hexdigest()\n except Exception:\n h = \"\"\n self._file_hash[fp] = h\n\n","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.prompts","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.modules.prompts#L1-L25","kind":"module","name":"examples.python_repo_grounded_qa.modules.prompts","path":"examples/python_repo_grounded_qa/modules/prompts.py","language":"python","start_line":1,"end_line":25,"context_start_line":1,"context_end_line":25,"code":"from typing import List\nfrom ..code_graph import CodeGraph\n\ndef build_prompts_for_module(g: CodeGraph, module: str, max_q: int = 3) -> List[str]:\n \"\"\"Construct a few simple, verifiable prompts for a module using symbol names and doc headers.\n\n Prefers questions that can be answered via local context/citations.\n \"\"\"\n prompts: List[str] = []\n # Symbols defined in the module\n defs = list(g.defs_in(module) or [])\n # Ask for an explanation of the module\n prompts.append(f\"Explain the key functions/classes in {module}. Cite path:line for each claim.\")\n # Ask about up to two concrete defs\n for fqn in defs[:2]:\n name = fqn.split(\".\")[-1]\n prompts.append(f\"What does `{name}` do in {module}? Show signature and cite path:line.\")\n # Unique and bounded\n uniq: List[str] = []\n for p in prompts:\n if p not in uniq:\n uniq.append(p)\n return uniq[: max(1, int(max_q))]\n\n","source_hash":"02441a59f7347f3704d9f17e1da09fb5bcbaabbc0806534a9e84a4f7b356d79f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.prompts.build_prompts_for_module","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.prompts.build_prompts_for_module#L4-L23","kind":"function","name":"build_prompts_for_module","path":"examples/python_repo_grounded_qa/modules/prompts.py","language":"python","start_line":4,"end_line":23,"context_start_line":1,"context_end_line":25,"code":"from typing import List\nfrom ..code_graph import CodeGraph\n\ndef build_prompts_for_module(g: CodeGraph, module: str, max_q: int = 3) -> List[str]:\n \"\"\"Construct a few simple, verifiable prompts for a module using symbol names and doc headers.\n\n Prefers questions that can be answered via local context/citations.\n \"\"\"\n prompts: List[str] = []\n # Symbols defined in the module\n defs = list(g.defs_in(module) or [])\n # Ask for an explanation of the module\n prompts.append(f\"Explain the key functions/classes in {module}. Cite path:line for each claim.\")\n # Ask about up to two concrete defs\n for fqn in defs[:2]:\n name = fqn.split(\".\")[-1]\n prompts.append(f\"What does `{name}` do in {module}? Show signature and cite path:line.\")\n # Unique and bounded\n uniq: List[str] = []\n for p in prompts:\n if p not in uniq:\n uniq.append(p)\n return uniq[: max(1, int(max_q))]\n\n","source_hash":"02441a59f7347f3704d9f17e1da09fb5bcbaabbc0806534a9e84a4f7b356d79f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.modules.verify#L1-L314","kind":"module","name":"examples.python_repo_grounded_qa.modules.verify","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":1,"end_line":314,"context_start_line":1,"context_end_line":314,"code":"# citation regex check, pytest test runner\n\nfrom typing import Dict, List, Any\nimport subprocess\nfrom ..code_graph import CodeGraph\nimport os\n\n\ndef verify_with_tests(g: CodeGraph, module: str, *, repo_root: str, env: Dict[str, str]) -> bool:\n \"\"\"Run mapped tests for a module if available; return True if all selected tests pass.\n\n Uses CodeGraph's tests mapping (best-effort). If no tests mapped, returns True as a soft pass.\n \"\"\"\n nodes = g.pytest_nodes_by_module.get(module, [])\n # Also include module-level mapping\n if not nodes:\n nodes = g.tests_for_module(module)\n if not nodes:\n return True # no tests to run; accept\n # Build pytest command\n cmd = [\"pytest\", \"-q\"]\n # Prefer node ids if present (file::Class::test), else module paths\n for n in nodes[:8]: # cap for speed\n cmd.append(n)\n try:\n proc = subprocess.run(cmd, cwd=repo_root, env=env, capture_output=True, text=True)\n return int(proc.returncode) == 0\n except Exception:\n return False\n\n\ndef extract_citations(text: str) -> List[str]:\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n return [m.group(1) for m in rx.finditer(text or \"\")]\n except Exception:\n return []\n\n\ndef has_citations(s: str, per_para: bool) -> bool:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s):\n return False\n if per_para:\n paras = [p.strip() for p in s.split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n\n\ndef normalize_citations(text: str) -> List[tuple[str, int, int]]:\n \"\"\"Return normalized (path, a, b) triples for all citations in text.\n\n If end is missing, b==a. Paths are returned as-is (caller can make them repo-relative).\n \"\"\"\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n out: List[tuple[str, int, int]] = []\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:\n b = a\n out.append((p, min(a, b), max(a, b)))\n return out\n except Exception:\n return []\n\n\ndef extract_typed_facts(text: str, g: CodeGraph) -> List[dict]:\n \"\"\"Best-effort, lightweight fact extraction from answer text.\n\n Emits dicts like {kind, symbol, span} where:\n - kind in {\"signature\", \"mentions\", \"returns\"}\n - symbol is best-effort FQN or name\n - span optionally refers to a cited (path,a-b) if the symbol can be mapped later\n \"\"\"\n out: List[dict] = []\n try:\n import re\n # signature claims: show name and parenthesized params\n for m in re.finditer(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(([^)]*)\\)\", text or \"\"):\n name = m.group(1)\n params = m.group(2)\n # map to FQN if possible (short name match)\n fqns = [s.fqn for s in g.find_symbol(name)] if name else []\n out.append({\n \"kind\": \"signature\",\n \"symbol\": fqns[0] if fqns else name,\n \"params\": params,\n })\n # explicit returns claims (very naive)\n for m in re.finditer(r\"returns\\s+([A-Za-z_][A-Za-z0-9_.]*)\", (text or \"\").lower()):\n typ = m.group(1)\n out.append({\"kind\": \"returns\", \"type\": typ})\n # code mentions in backticks\n for m in re.finditer(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\"):\n sym = m.group(1)\n out.append({\"kind\": \"mentions\", \"symbol\": sym})\n except Exception:\n return out\n return out\n\n\ndef extract_symbol_mentions(text: str) -> List[str]:\n \"\"\"Collect probable symbol names from text using simple patterns.\"\"\"\n names: List[str] = []\n try:\n import re\n # Backticked identifiers or dotted names\n for m in re.finditer(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\"):\n names.append(m.group(1).split(\".\")[-1])\n # Bare function/class name followed by '('\n for m in re.finditer(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\"):\n names.append(m.group(1))\n except Exception:\n return names\n # De-dup while preserving order\n seen = set()\n uniq: List[str] = []\n for n in names:\n if n and n not in seen:\n seen.add(n)\n uniq.append(n)\n return uniq\n\n\ndef _abs_paths_like(root: str, files: List[str]) -> List[str]:\n out: List[str] = []\n for rel in files or []:\n if not rel:\n continue\n if os.path.isabs(rel):\n out.append(os.path.abspath(rel))\n else:\n out.append(os.path.abspath(os.path.join(root, rel)))\n return out\n\n\ndef _to_repo_relative(root: str, abs_path: str) -> str:\n try:\n root_abs = os.path.abspath(root or \"\")\n p_abs = os.path.abspath(abs_path or \"\")\n if p_abs.startswith(root_abs):\n rel = p_abs[len(root_abs):].lstrip(os.sep)\n return rel.replace(\"\\\\\", \"/\")\n return abs_path.replace(\"\\\\\", \"/\")\n except Exception:\n return abs_path.replace(\"\\\\\", \"/\")\n\n\ndef _merge_overlapping_spans(spans: List[tuple[str, int, int]]) -> List[tuple[str, int, int]]:\n by_file: Dict[str, List[tuple[int, int]]] = {}\n for p, a, b in spans:\n by_file.setdefault(p, []).append((min(a, b), max(a, b)))\n merged: List[tuple[str, int, int]] = []\n for p, ranges in by_file.items():\n ranges.sort()\n cur_a, cur_b = ranges[0]\n for a, b in ranges[1:]:\n if a <= cur_b + 1:\n cur_b = max(cur_b, b)\n else:\n merged.append((p, cur_a, cur_b))\n cur_a, cur_b = a, b\n merged.append((p, cur_a, cur_b))\n return merged\n\n\ndef resolve_claim_spans(text: str, g: CodeGraph, idx: object, files_scope: List[str]) -> List[tuple[str, int, int]]:\n \"\"\"Resolve spans for claims using anchor-first mapping, then similarity fallback.\"\"\"\n try:\n scope_abs = set(_abs_paths_like(g.root, files_scope))\n except Exception:\n scope_abs = set()\n spans: List[tuple[str, int, int]] = []\n names = extract_symbol_mentions(text or \"\")\n # Anchor-first by symbol lookup\n for name in names:\n try:\n cands = [s for s in g.find_symbol(name) if (not scope_abs or os.path.abspath(s.file) in scope_abs)]\n except Exception:\n cands = []\n if cands:\n # Prefer shortest span (tighter definition)\n s = min(cands, key=lambda z: (int(getattr(z, \"end_line\", getattr(z, \"line\", 1))) - int(getattr(z, \"line\", 1))), default=None)\n if s:\n rel = _to_repo_relative(g.root, s.file)\n spans.append((rel, int(getattr(s, \"line\", 1)), int(getattr(s, \"end_line\", getattr(s, \"line\", 1)))))\n # Similarity fallback via SymbolIndex if nothing found\n if not spans and idx is not None:\n try:\n # Late import type to avoid hard dependency\n top = getattr(idx, \"query\")(text or \"\", 3)\n for it in (top or [])[:3]:\n rel = it.get(\"rel\") or _to_repo_relative(g.root, it.get(\"file\", \"\"))\n a = int(it.get(\"line\", 1)); b = int(it.get(\"end_line\", a))\n spans.append((rel, min(a, b), max(a, b)))\n except Exception:\n pass\n # Merge overlaps and de-dup\n seen = set()\n spans = _merge_overlapping_spans([s for s in spans if s not in seen and not seen.add(s)])\n return spans\n\n\ndef validate_spans(cites: List[tuple[str, int, int]], g: CodeGraph) -> List[tuple[str, int, int]]:\n \"\"\"Clamp to file bounds and ensure 1-based inclusive ranges.\"\"\"\n ok: List[tuple[str, int, int]] = []\n for p, a, b in cites:\n a0 = max(1, int(a)); b0 = max(a0, int(b))\n # Best-effort cap to file length\n try:\n abs_fp = os.path.join(g.root, p) if not os.path.isabs(p) else p\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n n = sum(1 for _ in fh)\n b0 = min(b0, int(n))\n except Exception:\n pass\n ok.append((p, a0, b0))\n return ok\n\n\ndef _file_from_citation_path(g: CodeGraph, files_scope: List[str], cite_path: str) -> str | None:\n \"\"\"Resolve a citation path to a repo-relative path within scope, allowing basename-only matches.\"\"\"\n if not cite_path:\n return None\n # If absolute, try to make it repo-relative\n if os.path.isabs(cite_path):\n return _to_repo_relative(g.root, cite_path)\n # Direct match in scope\n scope_rel = [f if not os.path.isabs(f) else _to_repo_relative(g.root, f) for f in (files_scope or [])]\n if cite_path in scope_rel:\n return cite_path\n # Basename match in scope\n base = os.path.basename(cite_path)\n cands = [r for r in scope_rel if os.path.basename(r) == base]\n if len(cands) == 1:\n return cands[0]\n # As a fallback, accept the path as-is (caller will attempt to open)\n return cite_path\n\n\ndef _nearest_symbol_span_for_line(g: CodeGraph, rel_path: str, a: int, b: int) -> tuple[int, int] | None:\n \"\"\"Find a symbol span in file that overlaps [a,b], else nearest by distance to midpoint.\"\"\"\n try:\n abs_fp = os.path.join(g.root, rel_path) if not os.path.isabs(rel_path) else rel_path\n # Filter symbols for this file\n syms: List[Any] = []\n try:\n syms_all = list(getattr(g, \"symbols_by_fqn\", {}).values())\n except Exception:\n syms_all = []\n for s in syms_all or []:\n try:\n if os.path.abspath(getattr(s, \"file\", \"\")) == os.path.abspath(abs_fp):\n syms.append(s)\n except Exception:\n continue\n if not syms:\n return None\n a0, b0 = int(a), int(b)\n # Overlap first\n overlaps = [s for s in syms if not (int(s.end_line) < a0 or int(s.line) > b0)]\n if overlaps:\n s = min(overlaps, key=lambda z: (int(z.end_line) - int(z.line)))\n return int(s.line), int(s.end_line)\n # Nearest by midpoint distance\n mid = (a0 + b0) // 2\n s = min(syms, key=lambda z: min(abs(mid - int(z.line)), abs(mid - int(z.end_line))))\n return int(s.line), int(s.end_line)\n except Exception:\n return None\n\n\ndef repair_citations(text: str, g: CodeGraph, files_scope: List[str]) -> List[tuple[str, int, int]]:\n \"\"\"Map cited path:a-b to nearest anchored symbol spans within scope, de-dup and validate.\"\"\"\n cites = normalize_citations(text or \"\")\n if not cites:\n return []\n repaired: List[tuple[str, int, int]] = []\n for (p_raw, a, b) in cites:\n try:\n rel = _file_from_citation_path(g, files_scope, p_raw)\n if not rel:\n continue\n span = _nearest_symbol_span_for_line(g, rel, a, b)\n if not span:\n # Keep original range if no symbol found\n repaired.append((rel, min(int(a), int(b)), max(int(a), int(b))))\n continue\n sa, sb = span\n repaired.append((rel, int(sa), int(sb)))\n except Exception:\n continue\n # Validate and merge\n repaired = validate_spans(repaired, g)\n # De-dup\n seen = set()\n uniq = []\n for r in repaired:\n if r not in seen:\n seen.add(r)\n uniq.append(r)\n return _merge_overlapping_spans(uniq)\n\n","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.verify_with_tests","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.verify_with_tests#L9-L29","kind":"function","name":"verify_with_tests","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":9,"end_line":29,"context_start_line":1,"context_end_line":49,"code":"# citation regex check, pytest test runner\n\nfrom typing import Dict, List, Any\nimport subprocess\nfrom ..code_graph import CodeGraph\nimport os\n\n\ndef verify_with_tests(g: CodeGraph, module: str, *, repo_root: str, env: Dict[str, str]) -> bool:\n \"\"\"Run mapped tests for a module if available; return True if all selected tests pass.\n\n Uses CodeGraph's tests mapping (best-effort). If no tests mapped, returns True as a soft pass.\n \"\"\"\n nodes = g.pytest_nodes_by_module.get(module, [])\n # Also include module-level mapping\n if not nodes:\n nodes = g.tests_for_module(module)\n if not nodes:\n return True # no tests to run; accept\n # Build pytest command\n cmd = [\"pytest\", \"-q\"]\n # Prefer node ids if present (file::Class::test), else module paths\n for n in nodes[:8]: # cap for speed\n cmd.append(n)\n try:\n proc = subprocess.run(cmd, cwd=repo_root, env=env, capture_output=True, text=True)\n return int(proc.returncode) == 0\n except Exception:\n return False\n\n\ndef extract_citations(text: str) -> List[str]:\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n return [m.group(1) for m in rx.finditer(text or \"\")]\n except Exception:\n return []\n\n\ndef has_citations(s: str, per_para: bool) -> bool:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s):\n return False\n if per_para:\n paras = [p.strip() for p in s.split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.extract_citations","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.extract_citations#L32-L38","kind":"function","name":"extract_citations","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":32,"end_line":38,"context_start_line":12,"context_end_line":58,"code":" Uses CodeGraph's tests mapping (best-effort). If no tests mapped, returns True as a soft pass.\n \"\"\"\n nodes = g.pytest_nodes_by_module.get(module, [])\n # Also include module-level mapping\n if not nodes:\n nodes = g.tests_for_module(module)\n if not nodes:\n return True # no tests to run; accept\n # Build pytest command\n cmd = [\"pytest\", \"-q\"]\n # Prefer node ids if present (file::Class::test), else module paths\n for n in nodes[:8]: # cap for speed\n cmd.append(n)\n try:\n proc = subprocess.run(cmd, cwd=repo_root, env=env, capture_output=True, text=True)\n return int(proc.returncode) == 0\n except Exception:\n return False\n\n\ndef extract_citations(text: str) -> List[str]:\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n return [m.group(1) for m in rx.finditer(text or \"\")]\n except Exception:\n return []\n\n\ndef has_citations(s: str, per_para: bool) -> bool:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s):\n return False\n if per_para:\n paras = [p.strip() for p in s.split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n\n\ndef normalize_citations(text: str) -> List[tuple[str, int, int]]:\n \"\"\"Return normalized (path, a, b) triples for all citations in text.\n\n If end is missing, b==a. Paths are returned as-is (caller can make them repo-relative).\n \"\"\"\n try:\n import re","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.has_citations","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.has_citations#L41-L49","kind":"function","name":"has_citations","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":41,"end_line":49,"context_start_line":21,"context_end_line":69,"code":" cmd = [\"pytest\", \"-q\"]\n # Prefer node ids if present (file::Class::test), else module paths\n for n in nodes[:8]: # cap for speed\n cmd.append(n)\n try:\n proc = subprocess.run(cmd, cwd=repo_root, env=env, capture_output=True, text=True)\n return int(proc.returncode) == 0\n except Exception:\n return False\n\n\ndef extract_citations(text: str) -> List[str]:\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n return [m.group(1) for m in rx.finditer(text or \"\")]\n except Exception:\n return []\n\n\ndef has_citations(s: str, per_para: bool) -> bool:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s):\n return False\n if per_para:\n paras = [p.strip() for p in s.split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n\n\ndef normalize_citations(text: str) -> List[tuple[str, int, int]]:\n \"\"\"Return normalized (path, a, b) triples for all citations in text.\n\n If end is missing, b==a. Paths are returned as-is (caller can make them repo-relative).\n \"\"\"\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n out: List[tuple[str, int, int]] = []\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.normalize_citations","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.normalize_citations#L52-L74","kind":"function","name":"normalize_citations","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":52,"end_line":74,"context_start_line":32,"context_end_line":94,"code":"def extract_citations(text: str) -> List[str]:\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n return [m.group(1) for m in rx.finditer(text or \"\")]\n except Exception:\n return []\n\n\ndef has_citations(s: str, per_para: bool) -> bool:\n import re as _re\n rx = _re.compile(r\"(?:path:\\s*)?[A-Za-z0-9_./-]+?\\.\\w+:\\d+(?:-\\d+)?\")\n if not rx.search(s):\n return False\n if per_para:\n paras = [p.strip() for p in s.split(\"\\n\\n\") if p.strip()]\n return all(rx.search(p) for p in paras)\n return True\n\n\ndef normalize_citations(text: str) -> List[tuple[str, int, int]]:\n \"\"\"Return normalized (path, a, b) triples for all citations in text.\n\n If end is missing, b==a. Paths are returned as-is (caller can make them repo-relative).\n \"\"\"\n try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n out: List[tuple[str, int, int]] = []\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:\n b = a\n out.append((p, min(a, b), max(a, b)))\n return out\n except Exception:\n return []\n\n\ndef extract_typed_facts(text: str, g: CodeGraph) -> List[dict]:\n \"\"\"Best-effort, lightweight fact extraction from answer text.\n\n Emits dicts like {kind, symbol, span} where:\n - kind in {\"signature\", \"mentions\", \"returns\"}\n - symbol is best-effort FQN or name\n - span optionally refers to a cited (path,a-b) if the symbol can be mapped later\n \"\"\"\n out: List[dict] = []\n try:\n import re\n # signature claims: show name and parenthesized params\n for m in re.finditer(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(([^)]*)\\)\", text or \"\"):\n name = m.group(1)\n params = m.group(2)\n # map to FQN if possible (short name match)\n fqns = [s.fqn for s in g.find_symbol(name)] if name else []\n out.append({","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.extract_typed_facts","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.extract_typed_facts#L77-L109","kind":"function","name":"extract_typed_facts","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":77,"end_line":109,"context_start_line":57,"context_end_line":129,"code":" try:\n import re\n rx = re.compile(r\"(?:path:\\s*)?([A-Za-z0-9_./\\-]+?\\.\\w+):(\\d+)(?:-(\\d+))?\")\n out: List[tuple[str, int, int]] = []\n for m in rx.finditer(text or \"\"):\n p = m.group(1)\n try:\n a = int(m.group(2) or \"0\")\n except Exception:\n a = 0\n try:\n b = int(m.group(3) or a)\n except Exception:\n b = a\n out.append((p, min(a, b), max(a, b)))\n return out\n except Exception:\n return []\n\n\ndef extract_typed_facts(text: str, g: CodeGraph) -> List[dict]:\n \"\"\"Best-effort, lightweight fact extraction from answer text.\n\n Emits dicts like {kind, symbol, span} where:\n - kind in {\"signature\", \"mentions\", \"returns\"}\n - symbol is best-effort FQN or name\n - span optionally refers to a cited (path,a-b) if the symbol can be mapped later\n \"\"\"\n out: List[dict] = []\n try:\n import re\n # signature claims: show name and parenthesized params\n for m in re.finditer(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(([^)]*)\\)\", text or \"\"):\n name = m.group(1)\n params = m.group(2)\n # map to FQN if possible (short name match)\n fqns = [s.fqn for s in g.find_symbol(name)] if name else []\n out.append({\n \"kind\": \"signature\",\n \"symbol\": fqns[0] if fqns else name,\n \"params\": params,\n })\n # explicit returns claims (very naive)\n for m in re.finditer(r\"returns\\s+([A-Za-z_][A-Za-z0-9_.]*)\", (text or \"\").lower()):\n typ = m.group(1)\n out.append({\"kind\": \"returns\", \"type\": typ})\n # code mentions in backticks\n for m in re.finditer(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\"):\n sym = m.group(1)\n out.append({\"kind\": \"mentions\", \"symbol\": sym})\n except Exception:\n return out\n return out\n\n\ndef extract_symbol_mentions(text: str) -> List[str]:\n \"\"\"Collect probable symbol names from text using simple patterns.\"\"\"\n names: List[str] = []\n try:\n import re\n # Backticked identifiers or dotted names\n for m in re.finditer(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\"):\n names.append(m.group(1).split(\".\")[-1])\n # Bare function/class name followed by '('\n for m in re.finditer(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\"):\n names.append(m.group(1))\n except Exception:\n return names\n # De-dup while preserving order\n seen = set()\n uniq: List[str] = []\n for n in names:\n if n and n not in seen:","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.extract_symbol_mentions","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.extract_symbol_mentions#L112-L132","kind":"function","name":"extract_symbol_mentions","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":112,"end_line":132,"context_start_line":92,"context_end_line":152,"code":" # map to FQN if possible (short name match)\n fqns = [s.fqn for s in g.find_symbol(name)] if name else []\n out.append({\n \"kind\": \"signature\",\n \"symbol\": fqns[0] if fqns else name,\n \"params\": params,\n })\n # explicit returns claims (very naive)\n for m in re.finditer(r\"returns\\s+([A-Za-z_][A-Za-z0-9_.]*)\", (text or \"\").lower()):\n typ = m.group(1)\n out.append({\"kind\": \"returns\", \"type\": typ})\n # code mentions in backticks\n for m in re.finditer(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\"):\n sym = m.group(1)\n out.append({\"kind\": \"mentions\", \"symbol\": sym})\n except Exception:\n return out\n return out\n\n\ndef extract_symbol_mentions(text: str) -> List[str]:\n \"\"\"Collect probable symbol names from text using simple patterns.\"\"\"\n names: List[str] = []\n try:\n import re\n # Backticked identifiers or dotted names\n for m in re.finditer(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\"):\n names.append(m.group(1).split(\".\")[-1])\n # Bare function/class name followed by '('\n for m in re.finditer(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\"):\n names.append(m.group(1))\n except Exception:\n return names\n # De-dup while preserving order\n seen = set()\n uniq: List[str] = []\n for n in names:\n if n and n not in seen:\n seen.add(n)\n uniq.append(n)\n return uniq\n\n\ndef _abs_paths_like(root: str, files: List[str]) -> List[str]:\n out: List[str] = []\n for rel in files or []:\n if not rel:\n continue\n if os.path.isabs(rel):\n out.append(os.path.abspath(rel))\n else:\n out.append(os.path.abspath(os.path.join(root, rel)))\n return out\n\n\ndef _to_repo_relative(root: str, abs_path: str) -> str:\n try:\n root_abs = os.path.abspath(root or \"\")\n p_abs = os.path.abspath(abs_path or \"\")\n if p_abs.startswith(root_abs):\n rel = p_abs[len(root_abs):].lstrip(os.sep)","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify._abs_paths_like","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify._abs_paths_like#L135-L144","kind":"function","name":"_abs_paths_like","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":135,"end_line":144,"context_start_line":115,"context_end_line":164,"code":" try:\n import re\n # Backticked identifiers or dotted names\n for m in re.finditer(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\"):\n names.append(m.group(1).split(\".\")[-1])\n # Bare function/class name followed by '('\n for m in re.finditer(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\"):\n names.append(m.group(1))\n except Exception:\n return names\n # De-dup while preserving order\n seen = set()\n uniq: List[str] = []\n for n in names:\n if n and n not in seen:\n seen.add(n)\n uniq.append(n)\n return uniq\n\n\ndef _abs_paths_like(root: str, files: List[str]) -> List[str]:\n out: List[str] = []\n for rel in files or []:\n if not rel:\n continue\n if os.path.isabs(rel):\n out.append(os.path.abspath(rel))\n else:\n out.append(os.path.abspath(os.path.join(root, rel)))\n return out\n\n\ndef _to_repo_relative(root: str, abs_path: str) -> str:\n try:\n root_abs = os.path.abspath(root or \"\")\n p_abs = os.path.abspath(abs_path or \"\")\n if p_abs.startswith(root_abs):\n rel = p_abs[len(root_abs):].lstrip(os.sep)\n return rel.replace(\"\\\\\", \"/\")\n return abs_path.replace(\"\\\\\", \"/\")\n except Exception:\n return abs_path.replace(\"\\\\\", \"/\")\n\n\ndef _merge_overlapping_spans(spans: List[tuple[str, int, int]]) -> List[tuple[str, int, int]]:\n by_file: Dict[str, List[tuple[int, int]]] = {}\n for p, a, b in spans:\n by_file.setdefault(p, []).append((min(a, b), max(a, b)))\n merged: List[tuple[str, int, int]] = []\n for p, ranges in by_file.items():","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify._to_repo_relative","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify._to_repo_relative#L147-L156","kind":"function","name":"_to_repo_relative","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":147,"end_line":156,"context_start_line":127,"context_end_line":176,"code":" uniq: List[str] = []\n for n in names:\n if n and n not in seen:\n seen.add(n)\n uniq.append(n)\n return uniq\n\n\ndef _abs_paths_like(root: str, files: List[str]) -> List[str]:\n out: List[str] = []\n for rel in files or []:\n if not rel:\n continue\n if os.path.isabs(rel):\n out.append(os.path.abspath(rel))\n else:\n out.append(os.path.abspath(os.path.join(root, rel)))\n return out\n\n\ndef _to_repo_relative(root: str, abs_path: str) -> str:\n try:\n root_abs = os.path.abspath(root or \"\")\n p_abs = os.path.abspath(abs_path or \"\")\n if p_abs.startswith(root_abs):\n rel = p_abs[len(root_abs):].lstrip(os.sep)\n return rel.replace(\"\\\\\", \"/\")\n return abs_path.replace(\"\\\\\", \"/\")\n except Exception:\n return abs_path.replace(\"\\\\\", \"/\")\n\n\ndef _merge_overlapping_spans(spans: List[tuple[str, int, int]]) -> List[tuple[str, int, int]]:\n by_file: Dict[str, List[tuple[int, int]]] = {}\n for p, a, b in spans:\n by_file.setdefault(p, []).append((min(a, b), max(a, b)))\n merged: List[tuple[str, int, int]] = []\n for p, ranges in by_file.items():\n ranges.sort()\n cur_a, cur_b = ranges[0]\n for a, b in ranges[1:]:\n if a <= cur_b + 1:\n cur_b = max(cur_b, b)\n else:\n merged.append((p, cur_a, cur_b))\n cur_a, cur_b = a, b\n merged.append((p, cur_a, cur_b))\n return merged\n\n","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify._merge_overlapping_spans","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify._merge_overlapping_spans#L159-L174","kind":"function","name":"_merge_overlapping_spans","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":159,"end_line":174,"context_start_line":139,"context_end_line":194,"code":" continue\n if os.path.isabs(rel):\n out.append(os.path.abspath(rel))\n else:\n out.append(os.path.abspath(os.path.join(root, rel)))\n return out\n\n\ndef _to_repo_relative(root: str, abs_path: str) -> str:\n try:\n root_abs = os.path.abspath(root or \"\")\n p_abs = os.path.abspath(abs_path or \"\")\n if p_abs.startswith(root_abs):\n rel = p_abs[len(root_abs):].lstrip(os.sep)\n return rel.replace(\"\\\\\", \"/\")\n return abs_path.replace(\"\\\\\", \"/\")\n except Exception:\n return abs_path.replace(\"\\\\\", \"/\")\n\n\ndef _merge_overlapping_spans(spans: List[tuple[str, int, int]]) -> List[tuple[str, int, int]]:\n by_file: Dict[str, List[tuple[int, int]]] = {}\n for p, a, b in spans:\n by_file.setdefault(p, []).append((min(a, b), max(a, b)))\n merged: List[tuple[str, int, int]] = []\n for p, ranges in by_file.items():\n ranges.sort()\n cur_a, cur_b = ranges[0]\n for a, b in ranges[1:]:\n if a <= cur_b + 1:\n cur_b = max(cur_b, b)\n else:\n merged.append((p, cur_a, cur_b))\n cur_a, cur_b = a, b\n merged.append((p, cur_a, cur_b))\n return merged\n\n\ndef resolve_claim_spans(text: str, g: CodeGraph, idx: object, files_scope: List[str]) -> List[tuple[str, int, int]]:\n \"\"\"Resolve spans for claims using anchor-first mapping, then similarity fallback.\"\"\"\n try:\n scope_abs = set(_abs_paths_like(g.root, files_scope))\n except Exception:\n scope_abs = set()\n spans: List[tuple[str, int, int]] = []\n names = extract_symbol_mentions(text or \"\")\n # Anchor-first by symbol lookup\n for name in names:\n try:\n cands = [s for s in g.find_symbol(name) if (not scope_abs or os.path.abspath(s.file) in scope_abs)]\n except Exception:\n cands = []\n if cands:\n # Prefer shortest span (tighter definition)\n s = min(cands, key=lambda z: (int(getattr(z, \"end_line\", getattr(z, \"line\", 1))) - int(getattr(z, \"line\", 1))), default=None)\n if s:","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.resolve_claim_spans","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.resolve_claim_spans#L177-L211","kind":"function","name":"resolve_claim_spans","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":177,"end_line":211,"context_start_line":157,"context_end_line":231,"code":"\n\ndef _merge_overlapping_spans(spans: List[tuple[str, int, int]]) -> List[tuple[str, int, int]]:\n by_file: Dict[str, List[tuple[int, int]]] = {}\n for p, a, b in spans:\n by_file.setdefault(p, []).append((min(a, b), max(a, b)))\n merged: List[tuple[str, int, int]] = []\n for p, ranges in by_file.items():\n ranges.sort()\n cur_a, cur_b = ranges[0]\n for a, b in ranges[1:]:\n if a <= cur_b + 1:\n cur_b = max(cur_b, b)\n else:\n merged.append((p, cur_a, cur_b))\n cur_a, cur_b = a, b\n merged.append((p, cur_a, cur_b))\n return merged\n\n\ndef resolve_claim_spans(text: str, g: CodeGraph, idx: object, files_scope: List[str]) -> List[tuple[str, int, int]]:\n \"\"\"Resolve spans for claims using anchor-first mapping, then similarity fallback.\"\"\"\n try:\n scope_abs = set(_abs_paths_like(g.root, files_scope))\n except Exception:\n scope_abs = set()\n spans: List[tuple[str, int, int]] = []\n names = extract_symbol_mentions(text or \"\")\n # Anchor-first by symbol lookup\n for name in names:\n try:\n cands = [s for s in g.find_symbol(name) if (not scope_abs or os.path.abspath(s.file) in scope_abs)]\n except Exception:\n cands = []\n if cands:\n # Prefer shortest span (tighter definition)\n s = min(cands, key=lambda z: (int(getattr(z, \"end_line\", getattr(z, \"line\", 1))) - int(getattr(z, \"line\", 1))), default=None)\n if s:\n rel = _to_repo_relative(g.root, s.file)\n spans.append((rel, int(getattr(s, \"line\", 1)), int(getattr(s, \"end_line\", getattr(s, \"line\", 1)))))\n # Similarity fallback via SymbolIndex if nothing found\n if not spans and idx is not None:\n try:\n # Late import type to avoid hard dependency\n top = getattr(idx, \"query\")(text or \"\", 3)\n for it in (top or [])[:3]:\n rel = it.get(\"rel\") or _to_repo_relative(g.root, it.get(\"file\", \"\"))\n a = int(it.get(\"line\", 1)); b = int(it.get(\"end_line\", a))\n spans.append((rel, min(a, b), max(a, b)))\n except Exception:\n pass\n # Merge overlaps and de-dup\n seen = set()\n spans = _merge_overlapping_spans([s for s in spans if s not in seen and not seen.add(s)])\n return spans\n\n\ndef validate_spans(cites: List[tuple[str, int, int]], g: CodeGraph) -> List[tuple[str, int, int]]:\n \"\"\"Clamp to file bounds and ensure 1-based inclusive ranges.\"\"\"\n ok: List[tuple[str, int, int]] = []\n for p, a, b in cites:\n a0 = max(1, int(a)); b0 = max(a0, int(b))\n # Best-effort cap to file length\n try:\n abs_fp = os.path.join(g.root, p) if not os.path.isabs(p) else p\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n n = sum(1 for _ in fh)\n b0 = min(b0, int(n))\n except Exception:\n pass\n ok.append((p, a0, b0))\n return ok\n\n\ndef _file_from_citation_path(g: CodeGraph, files_scope: List[str], cite_path: str) -> str | None:","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.validate_spans","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.validate_spans#L214-L228","kind":"function","name":"validate_spans","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":214,"end_line":228,"context_start_line":194,"context_end_line":248,"code":" if s:\n rel = _to_repo_relative(g.root, s.file)\n spans.append((rel, int(getattr(s, \"line\", 1)), int(getattr(s, \"end_line\", getattr(s, \"line\", 1)))))\n # Similarity fallback via SymbolIndex if nothing found\n if not spans and idx is not None:\n try:\n # Late import type to avoid hard dependency\n top = getattr(idx, \"query\")(text or \"\", 3)\n for it in (top or [])[:3]:\n rel = it.get(\"rel\") or _to_repo_relative(g.root, it.get(\"file\", \"\"))\n a = int(it.get(\"line\", 1)); b = int(it.get(\"end_line\", a))\n spans.append((rel, min(a, b), max(a, b)))\n except Exception:\n pass\n # Merge overlaps and de-dup\n seen = set()\n spans = _merge_overlapping_spans([s for s in spans if s not in seen and not seen.add(s)])\n return spans\n\n\ndef validate_spans(cites: List[tuple[str, int, int]], g: CodeGraph) -> List[tuple[str, int, int]]:\n \"\"\"Clamp to file bounds and ensure 1-based inclusive ranges.\"\"\"\n ok: List[tuple[str, int, int]] = []\n for p, a, b in cites:\n a0 = max(1, int(a)); b0 = max(a0, int(b))\n # Best-effort cap to file length\n try:\n abs_fp = os.path.join(g.root, p) if not os.path.isabs(p) else p\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n n = sum(1 for _ in fh)\n b0 = min(b0, int(n))\n except Exception:\n pass\n ok.append((p, a0, b0))\n return ok\n\n\ndef _file_from_citation_path(g: CodeGraph, files_scope: List[str], cite_path: str) -> str | None:\n \"\"\"Resolve a citation path to a repo-relative path within scope, allowing basename-only matches.\"\"\"\n if not cite_path:\n return None\n # If absolute, try to make it repo-relative\n if os.path.isabs(cite_path):\n return _to_repo_relative(g.root, cite_path)\n # Direct match in scope\n scope_rel = [f if not os.path.isabs(f) else _to_repo_relative(g.root, f) for f in (files_scope or [])]\n if cite_path in scope_rel:\n return cite_path\n # Basename match in scope\n base = os.path.basename(cite_path)\n cands = [r for r in scope_rel if os.path.basename(r) == base]\n if len(cands) == 1:\n return cands[0]\n # As a fallback, accept the path as-is (caller will attempt to open)\n return cite_path","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify._file_from_citation_path","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify._file_from_citation_path#L231-L248","kind":"function","name":"_file_from_citation_path","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":231,"end_line":248,"context_start_line":211,"context_end_line":268,"code":" return spans\n\n\ndef validate_spans(cites: List[tuple[str, int, int]], g: CodeGraph) -> List[tuple[str, int, int]]:\n \"\"\"Clamp to file bounds and ensure 1-based inclusive ranges.\"\"\"\n ok: List[tuple[str, int, int]] = []\n for p, a, b in cites:\n a0 = max(1, int(a)); b0 = max(a0, int(b))\n # Best-effort cap to file length\n try:\n abs_fp = os.path.join(g.root, p) if not os.path.isabs(p) else p\n with open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\") as fh:\n n = sum(1 for _ in fh)\n b0 = min(b0, int(n))\n except Exception:\n pass\n ok.append((p, a0, b0))\n return ok\n\n\ndef _file_from_citation_path(g: CodeGraph, files_scope: List[str], cite_path: str) -> str | None:\n \"\"\"Resolve a citation path to a repo-relative path within scope, allowing basename-only matches.\"\"\"\n if not cite_path:\n return None\n # If absolute, try to make it repo-relative\n if os.path.isabs(cite_path):\n return _to_repo_relative(g.root, cite_path)\n # Direct match in scope\n scope_rel = [f if not os.path.isabs(f) else _to_repo_relative(g.root, f) for f in (files_scope or [])]\n if cite_path in scope_rel:\n return cite_path\n # Basename match in scope\n base = os.path.basename(cite_path)\n cands = [r for r in scope_rel if os.path.basename(r) == base]\n if len(cands) == 1:\n return cands[0]\n # As a fallback, accept the path as-is (caller will attempt to open)\n return cite_path\n\n\ndef _nearest_symbol_span_for_line(g: CodeGraph, rel_path: str, a: int, b: int) -> tuple[int, int] | None:\n \"\"\"Find a symbol span in file that overlaps [a,b], else nearest by distance to midpoint.\"\"\"\n try:\n abs_fp = os.path.join(g.root, rel_path) if not os.path.isabs(rel_path) else rel_path\n # Filter symbols for this file\n syms: List[Any] = []\n try:\n syms_all = list(getattr(g, \"symbols_by_fqn\", {}).values())\n except Exception:\n syms_all = []\n for s in syms_all or []:\n try:\n if os.path.abspath(getattr(s, \"file\", \"\")) == os.path.abspath(abs_fp):\n syms.append(s)\n except Exception:\n continue\n if not syms:\n return None","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify._nearest_symbol_span_for_line","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify._nearest_symbol_span_for_line#L251-L280","kind":"function","name":"_nearest_symbol_span_for_line","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":251,"end_line":280,"context_start_line":231,"context_end_line":300,"code":"def _file_from_citation_path(g: CodeGraph, files_scope: List[str], cite_path: str) -> str | None:\n \"\"\"Resolve a citation path to a repo-relative path within scope, allowing basename-only matches.\"\"\"\n if not cite_path:\n return None\n # If absolute, try to make it repo-relative\n if os.path.isabs(cite_path):\n return _to_repo_relative(g.root, cite_path)\n # Direct match in scope\n scope_rel = [f if not os.path.isabs(f) else _to_repo_relative(g.root, f) for f in (files_scope or [])]\n if cite_path in scope_rel:\n return cite_path\n # Basename match in scope\n base = os.path.basename(cite_path)\n cands = [r for r in scope_rel if os.path.basename(r) == base]\n if len(cands) == 1:\n return cands[0]\n # As a fallback, accept the path as-is (caller will attempt to open)\n return cite_path\n\n\ndef _nearest_symbol_span_for_line(g: CodeGraph, rel_path: str, a: int, b: int) -> tuple[int, int] | None:\n \"\"\"Find a symbol span in file that overlaps [a,b], else nearest by distance to midpoint.\"\"\"\n try:\n abs_fp = os.path.join(g.root, rel_path) if not os.path.isabs(rel_path) else rel_path\n # Filter symbols for this file\n syms: List[Any] = []\n try:\n syms_all = list(getattr(g, \"symbols_by_fqn\", {}).values())\n except Exception:\n syms_all = []\n for s in syms_all or []:\n try:\n if os.path.abspath(getattr(s, \"file\", \"\")) == os.path.abspath(abs_fp):\n syms.append(s)\n except Exception:\n continue\n if not syms:\n return None\n a0, b0 = int(a), int(b)\n # Overlap first\n overlaps = [s for s in syms if not (int(s.end_line) < a0 or int(s.line) > b0)]\n if overlaps:\n s = min(overlaps, key=lambda z: (int(z.end_line) - int(z.line)))\n return int(s.line), int(s.end_line)\n # Nearest by midpoint distance\n mid = (a0 + b0) // 2\n s = min(syms, key=lambda z: min(abs(mid - int(z.line)), abs(mid - int(z.end_line))))\n return int(s.line), int(s.end_line)\n except Exception:\n return None\n\n\ndef repair_citations(text: str, g: CodeGraph, files_scope: List[str]) -> List[tuple[str, int, int]]:\n \"\"\"Map cited path:a-b to nearest anchored symbol spans within scope, de-dup and validate.\"\"\"\n cites = normalize_citations(text or \"\")\n if not cites:\n return []\n repaired: List[tuple[str, int, int]] = []\n for (p_raw, a, b) in cites:\n try:\n rel = _file_from_citation_path(g, files_scope, p_raw)\n if not rel:\n continue\n span = _nearest_symbol_span_for_line(g, rel, a, b)\n if not span:\n # Keep original range if no symbol found\n repaired.append((rel, min(int(a), int(b)), max(int(a), int(b))))\n continue\n sa, sb = span\n repaired.append((rel, int(sa), int(sb)))","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.verify.repair_citations","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.verify.repair_citations#L283-L312","kind":"function","name":"repair_citations","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":283,"end_line":312,"context_start_line":263,"context_end_line":314,"code":" if os.path.abspath(getattr(s, \"file\", \"\")) == os.path.abspath(abs_fp):\n syms.append(s)\n except Exception:\n continue\n if not syms:\n return None\n a0, b0 = int(a), int(b)\n # Overlap first\n overlaps = [s for s in syms if not (int(s.end_line) < a0 or int(s.line) > b0)]\n if overlaps:\n s = min(overlaps, key=lambda z: (int(z.end_line) - int(z.line)))\n return int(s.line), int(s.end_line)\n # Nearest by midpoint distance\n mid = (a0 + b0) // 2\n s = min(syms, key=lambda z: min(abs(mid - int(z.line)), abs(mid - int(z.end_line))))\n return int(s.line), int(s.end_line)\n except Exception:\n return None\n\n\ndef repair_citations(text: str, g: CodeGraph, files_scope: List[str]) -> List[tuple[str, int, int]]:\n \"\"\"Map cited path:a-b to nearest anchored symbol spans within scope, de-dup and validate.\"\"\"\n cites = normalize_citations(text or \"\")\n if not cites:\n return []\n repaired: List[tuple[str, int, int]] = []\n for (p_raw, a, b) in cites:\n try:\n rel = _file_from_citation_path(g, files_scope, p_raw)\n if not rel:\n continue\n span = _nearest_symbol_span_for_line(g, rel, a, b)\n if not span:\n # Keep original range if no symbol found\n repaired.append((rel, min(int(a), int(b)), max(int(a), int(b))))\n continue\n sa, sb = span\n repaired.append((rel, int(sa), int(sb)))\n except Exception:\n continue\n # Validate and merge\n repaired = validate_spans(repaired, g)\n # De-dup\n seen = set()\n uniq = []\n for r in repaired:\n if r not in seen:\n seen.add(r)\n uniq.append(r)\n return _merge_overlapping_spans(uniq)\n\n","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.modules.selection#L1-L224","kind":"module","name":"examples.python_repo_grounded_qa.modules.selection","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":1,"end_line":224,"context_start_line":1,"context_end_line":224,"code":"# question-aware selection, zoom-by-symbol, function-first candidate collection, name-match helpers\nimport os\nimport re\nfrom typing import Dict, List, Optional, Tuple\n\nfrom ..code_graph import CodeGraph\n\n\ndef _re_escape(s: str) -> str:\n try:\n import re as _re\n return _re.escape(s)\n except Exception:\n return s\n\n\ndef modules_from_symbols(repo_root: str, seeds: List[str], *, radius: int = 1, top_k: int = 8, ignore: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n modules_set: Dict[str, bool] = {}\n files_set: Dict[str, bool] = {}\n for s in seeds:\n try:\n for sym in g.find_symbol(s):\n modules_set[sym.module] = True\n files_set[os.path.relpath(sym.file, g.root)] = True\n except Exception:\n continue\n if radius > 0 and modules_set:\n cur = list(modules_set.keys())\n seen = set(cur)\n for _ in range(max(0, int(radius))):\n nxt: List[str] = []\n for m in cur:\n for dep in g.module_imports.get(m, []):\n if dep not in seen:\n seen.add(dep)\n nxt.append(dep)\n cur = nxt\n for m in seen:\n modules_set[m] = True\n f = g.file_for_module(m)\n if f:\n files_set[os.path.relpath(f, g.root)] = True\n modules = sorted(modules_set.keys())[: top_k]\n files = sorted(files_set.keys())[: max(top_k, 8)]\n return modules, files\n\n\ndef question_aware_modules_and_files(repo_root: str, prompt: str, *, top_k: int = 8, ignore: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3]\n except Exception:\n toks = []\n score_by_file: Dict[str, int] = {}\n for t in toks[:10]:\n try:\n for (rel, _ln, _txt) in g.search_refs(_re_escape(t)):\n score_by_file[rel] = score_by_file.get(rel, 0) + 1\n except Exception:\n continue\n ranked_files = [fp for fp, _ in sorted(score_by_file.items(), key=lambda x: x[1], reverse=True)]\n files = ranked_files[: max(top_k, 8)]\n modules_set: Dict[str, bool] = {}\n for f in files:\n m = g.module_for_file(f)\n if m:\n modules_set[m] = True\n modules = sorted(modules_set.keys())[: top_k]\n return modules, files\n\n\ndef name_matches_prompt(name: Optional[str], prompt_tokens: set) -> bool:\n if not name:\n return False\n n = name.lower()\n if n in \" \".join(sorted(prompt_tokens)):\n return True\n parts = [p for p in n.replace(\"_\", \" \").split() if p]\n if not parts:\n return False\n inter = sum(1 for p in parts if p in prompt_tokens)\n need = 2 if len(parts) >= 2 else 1\n cat = \"\".join(parts)\n return bool(inter >= need or cat in prompt_tokens)\n\n\ndef prompt_token_set(prompt_q: str) -> set:\n try:\n import re as _re\n toks = _re.findall(r\"[A-Za-z0-9_]+\", (prompt_q or \"\").lower())\n return set(toks)\n except Exception:\n return set()\n\n\ndef _signature_tokens(sig: Optional[str]) -> List[str]:\n if not sig:\n return []\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", sig) if len(t) >= 2]\n return toks\n except Exception:\n return []\n\n\ndef _doc_head_tokens(doc: Optional[str]) -> List[str]:\n if not doc:\n return []\n try:\n head = (doc or \"\").splitlines()[0].lower()[:200]\n return [t for t in re.findall(r\"[a-z0-9_]+\", head) if len(t) >= 3]\n except Exception:\n return []\n\n\ndef _load_self_queries(path: Optional[str]) -> Dict[str, List[str]]:\n out: Dict[str, List[str]] = {}\n if not path:\n return out\n try:\n import json\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n try:\n obj = json.loads(ln)\n sym = str(obj.get(\"symbol\", \"\"))\n qs = [str(q) for q in (obj.get(\"queries\") or [])]\n if sym and qs:\n out[sym] = qs[:5]\n except Exception:\n continue\n except Exception:\n return {}\n return out\n\n\ndef rerank_modules_and_files(\n repo_root: str,\n prompt: str,\n modules: List[str],\n files: List[str],\n *,\n ignore: Optional[List[str]] = None,\n self_queries_path: Optional[str] = None,\n weights: Tuple[float, float, float, float, float] = (0.35, 0.25, 0.15, 0.15, 0.10),\n) -> Tuple[List[str], List[str]]:\n \"\"\"Hybrid reranker over initial module/file candidates.\n\n weights: (w_sig, w_call, w_cov, w_doc, w_vis)\n \"\"\"\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n p_tokens = prompt_token_set(prompt)\n w_sig, w_call, w_cov, w_doc, w_vis = [float(x) for x in weights]\n\n # Self-queries\n sq = _load_self_queries(self_queries_path)\n sq_tokens_by_sym: Dict[str, set] = {}\n for sym, qs in sq.items():\n toks = set()\n for q in qs:\n toks.update(prompt_token_set(q))\n sq_tokens_by_sym[sym] = toks\n\n # Pre-compute per-module features\n sig_by_mod: Dict[str, set] = {}\n doc_by_mod: Dict[str, set] = {}\n cov_by_mod: Dict[str, int] = {}\n call_neighbors: Dict[str, set] = {}\n vis_by_mod: Dict[str, float] = {}\n\n for fqn, s in g.symbols_by_fqn.items():\n m = s.module\n sig_by_mod.setdefault(m, set()).update(_signature_tokens(s.signature))\n doc_by_mod.setdefault(m, set()).update(_doc_head_tokens(s.doc))\n vis_by_mod[m] = 1.0 if (not m.endswith(\".__init__\") and not m.endswith(\".tests\")) else 0.8\n # coverage proxy: pytest nodes count\n for m, nodes in g.pytest_nodes_by_module.items():\n cov_by_mod[m] = len(nodes or [])\n # call neighborhoods\n for caller, callee in g.calls:\n cm = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n call_neighbors.setdefault(cm, set()).add(callee)\n\n def _module_score(m: str) -> float:\n sig = sig_by_mod.get(m, set())\n doc = doc_by_mod.get(m, set())\n sig_overlap = len(sig & p_tokens) / float(max(1, len(sig))) if sig else 0.0\n doc_overlap = len(doc & p_tokens) / float(max(1, len(doc))) if doc else 0.0\n call_compat = float(len(call_neighbors.get(m, set()))) / 25.0 # crude proxy, cap later\n cov = float(min(1.0, (cov_by_mod.get(m, 0) / 5.0)))\n vis = float(vis_by_mod.get(m, 1.0))\n # self-query boost if any symbol under module matches prompt tokens\n sq_boost = 0.0\n for fqn in g.defs_in(m):\n toks = sq_tokens_by_sym.get(fqn)\n if toks and (toks & p_tokens):\n sq_boost = max(sq_boost, 0.15)\n break\n score = (\n w_sig * sig_overlap + w_call * min(1.0, call_compat) + w_cov * cov + w_doc * doc_overlap + w_vis * vis + sq_boost\n )\n return float(max(0.0, min(2.0, score)))\n\n mod_scored = [(m, _module_score(m)) for m in modules]\n mod_scored.sort(key=lambda x: x[1], reverse=True)\n modules_new = [m for m, _s in mod_scored][: max(1, len(modules))]\n\n # File scores inherit module score; break ties by filename token overlap\n file_scored: List[Tuple[str, float]] = []\n for f in files:\n m = g.module_for_file(f)\n base = 0.0\n if m:\n base = next((s for (mm, s) in mod_scored if mm == m), 0.0)\n fn_toks = set([t for t in re.findall(r\"[a-z0-9_]+\", os.path.basename(f).lower()) if len(t) >= 3])\n fn_overlap = len(fn_toks & p_tokens) / float(max(1, len(fn_toks))) if fn_toks else 0.0\n file_scored.append((f, float(base + 0.05 * fn_overlap)))\n file_scored.sort(key=lambda x: x[1], reverse=True)\n files_new = [f for f, _s in file_scored][: max(1, len(files))]\n\n return modules_new, files_new\n\n","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection._re_escape","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection._re_escape#L9-L14","kind":"function","name":"_re_escape","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":9,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"# question-aware selection, zoom-by-symbol, function-first candidate collection, name-match helpers\nimport os\nimport re\nfrom typing import Dict, List, Optional, Tuple\n\nfrom ..code_graph import CodeGraph\n\n\ndef _re_escape(s: str) -> str:\n try:\n import re as _re\n return _re.escape(s)\n except Exception:\n return s\n\n\ndef modules_from_symbols(repo_root: str, seeds: List[str], *, radius: int = 1, top_k: int = 8, ignore: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n modules_set: Dict[str, bool] = {}\n files_set: Dict[str, bool] = {}\n for s in seeds:\n try:\n for sym in g.find_symbol(s):\n modules_set[sym.module] = True\n files_set[os.path.relpath(sym.file, g.root)] = True\n except Exception:\n continue\n if radius > 0 and modules_set:\n cur = list(modules_set.keys())\n seen = set(cur)\n for _ in range(max(0, int(radius))):\n nxt: List[str] = []\n for m in cur:\n for dep in g.module_imports.get(m, []):","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection.modules_from_symbols","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection.modules_from_symbols#L17-L46","kind":"function","name":"modules_from_symbols","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":17,"end_line":46,"context_start_line":1,"context_end_line":66,"code":"# question-aware selection, zoom-by-symbol, function-first candidate collection, name-match helpers\nimport os\nimport re\nfrom typing import Dict, List, Optional, Tuple\n\nfrom ..code_graph import CodeGraph\n\n\ndef _re_escape(s: str) -> str:\n try:\n import re as _re\n return _re.escape(s)\n except Exception:\n return s\n\n\ndef modules_from_symbols(repo_root: str, seeds: List[str], *, radius: int = 1, top_k: int = 8, ignore: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n modules_set: Dict[str, bool] = {}\n files_set: Dict[str, bool] = {}\n for s in seeds:\n try:\n for sym in g.find_symbol(s):\n modules_set[sym.module] = True\n files_set[os.path.relpath(sym.file, g.root)] = True\n except Exception:\n continue\n if radius > 0 and modules_set:\n cur = list(modules_set.keys())\n seen = set(cur)\n for _ in range(max(0, int(radius))):\n nxt: List[str] = []\n for m in cur:\n for dep in g.module_imports.get(m, []):\n if dep not in seen:\n seen.add(dep)\n nxt.append(dep)\n cur = nxt\n for m in seen:\n modules_set[m] = True\n f = g.file_for_module(m)\n if f:\n files_set[os.path.relpath(f, g.root)] = True\n modules = sorted(modules_set.keys())[: top_k]\n files = sorted(files_set.keys())[: max(top_k, 8)]\n return modules, files\n\n\ndef question_aware_modules_and_files(repo_root: str, prompt: str, *, top_k: int = 8, ignore: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3]\n except Exception:\n toks = []\n score_by_file: Dict[str, int] = {}\n for t in toks[:10]:\n try:\n for (rel, _ln, _txt) in g.search_refs(_re_escape(t)):\n score_by_file[rel] = score_by_file.get(rel, 0) + 1\n except Exception:\n continue\n ranked_files = [fp for fp, _ in sorted(score_by_file.items(), key=lambda x: x[1], reverse=True)]\n files = ranked_files[: max(top_k, 8)]\n modules_set: Dict[str, bool] = {}\n for f in files:\n m = g.module_for_file(f)","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection.question_aware_modules_and_files","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection.question_aware_modules_and_files#L49-L70","kind":"function","name":"question_aware_modules_and_files","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":49,"end_line":70,"context_start_line":29,"context_end_line":90,"code":" cur = list(modules_set.keys())\n seen = set(cur)\n for _ in range(max(0, int(radius))):\n nxt: List[str] = []\n for m in cur:\n for dep in g.module_imports.get(m, []):\n if dep not in seen:\n seen.add(dep)\n nxt.append(dep)\n cur = nxt\n for m in seen:\n modules_set[m] = True\n f = g.file_for_module(m)\n if f:\n files_set[os.path.relpath(f, g.root)] = True\n modules = sorted(modules_set.keys())[: top_k]\n files = sorted(files_set.keys())[: max(top_k, 8)]\n return modules, files\n\n\ndef question_aware_modules_and_files(repo_root: str, prompt: str, *, top_k: int = 8, ignore: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", (prompt or \"\").lower()) if len(t) >= 3]\n except Exception:\n toks = []\n score_by_file: Dict[str, int] = {}\n for t in toks[:10]:\n try:\n for (rel, _ln, _txt) in g.search_refs(_re_escape(t)):\n score_by_file[rel] = score_by_file.get(rel, 0) + 1\n except Exception:\n continue\n ranked_files = [fp for fp, _ in sorted(score_by_file.items(), key=lambda x: x[1], reverse=True)]\n files = ranked_files[: max(top_k, 8)]\n modules_set: Dict[str, bool] = {}\n for f in files:\n m = g.module_for_file(f)\n if m:\n modules_set[m] = True\n modules = sorted(modules_set.keys())[: top_k]\n return modules, files\n\n\ndef name_matches_prompt(name: Optional[str], prompt_tokens: set) -> bool:\n if not name:\n return False\n n = name.lower()\n if n in \" \".join(sorted(prompt_tokens)):\n return True\n parts = [p for p in n.replace(\"_\", \" \").split() if p]\n if not parts:\n return False\n inter = sum(1 for p in parts if p in prompt_tokens)\n need = 2 if len(parts) >= 2 else 1\n cat = \"\".join(parts)\n return bool(inter >= need or cat in prompt_tokens)\n\n\ndef prompt_token_set(prompt_q: str) -> set:\n try:\n import re as _re","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection.name_matches_prompt","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection.name_matches_prompt#L73-L85","kind":"function","name":"name_matches_prompt","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":73,"end_line":85,"context_start_line":53,"context_end_line":105,"code":" except Exception:\n toks = []\n score_by_file: Dict[str, int] = {}\n for t in toks[:10]:\n try:\n for (rel, _ln, _txt) in g.search_refs(_re_escape(t)):\n score_by_file[rel] = score_by_file.get(rel, 0) + 1\n except Exception:\n continue\n ranked_files = [fp for fp, _ in sorted(score_by_file.items(), key=lambda x: x[1], reverse=True)]\n files = ranked_files[: max(top_k, 8)]\n modules_set: Dict[str, bool] = {}\n for f in files:\n m = g.module_for_file(f)\n if m:\n modules_set[m] = True\n modules = sorted(modules_set.keys())[: top_k]\n return modules, files\n\n\ndef name_matches_prompt(name: Optional[str], prompt_tokens: set) -> bool:\n if not name:\n return False\n n = name.lower()\n if n in \" \".join(sorted(prompt_tokens)):\n return True\n parts = [p for p in n.replace(\"_\", \" \").split() if p]\n if not parts:\n return False\n inter = sum(1 for p in parts if p in prompt_tokens)\n need = 2 if len(parts) >= 2 else 1\n cat = \"\".join(parts)\n return bool(inter >= need or cat in prompt_tokens)\n\n\ndef prompt_token_set(prompt_q: str) -> set:\n try:\n import re as _re\n toks = _re.findall(r\"[A-Za-z0-9_]+\", (prompt_q or \"\").lower())\n return set(toks)\n except Exception:\n return set()\n\n\ndef _signature_tokens(sig: Optional[str]) -> List[str]:\n if not sig:\n return []\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", sig) if len(t) >= 2]\n return toks\n except Exception:\n return []\n","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection.prompt_token_set","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection.prompt_token_set#L88-L94","kind":"function","name":"prompt_token_set","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":88,"end_line":94,"context_start_line":68,"context_end_line":114,"code":" modules_set[m] = True\n modules = sorted(modules_set.keys())[: top_k]\n return modules, files\n\n\ndef name_matches_prompt(name: Optional[str], prompt_tokens: set) -> bool:\n if not name:\n return False\n n = name.lower()\n if n in \" \".join(sorted(prompt_tokens)):\n return True\n parts = [p for p in n.replace(\"_\", \" \").split() if p]\n if not parts:\n return False\n inter = sum(1 for p in parts if p in prompt_tokens)\n need = 2 if len(parts) >= 2 else 1\n cat = \"\".join(parts)\n return bool(inter >= need or cat in prompt_tokens)\n\n\ndef prompt_token_set(prompt_q: str) -> set:\n try:\n import re as _re\n toks = _re.findall(r\"[A-Za-z0-9_]+\", (prompt_q or \"\").lower())\n return set(toks)\n except Exception:\n return set()\n\n\ndef _signature_tokens(sig: Optional[str]) -> List[str]:\n if not sig:\n return []\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", sig) if len(t) >= 2]\n return toks\n except Exception:\n return []\n\n\ndef _doc_head_tokens(doc: Optional[str]) -> List[str]:\n if not doc:\n return []\n try:\n head = (doc or \"\").splitlines()[0].lower()[:200]\n return [t for t in re.findall(r\"[a-z0-9_]+\", head) if len(t) >= 3]\n except Exception:\n return []","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection._signature_tokens","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection._signature_tokens#L97-L104","kind":"function","name":"_signature_tokens","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":97,"end_line":104,"context_start_line":77,"context_end_line":124,"code":" if n in \" \".join(sorted(prompt_tokens)):\n return True\n parts = [p for p in n.replace(\"_\", \" \").split() if p]\n if not parts:\n return False\n inter = sum(1 for p in parts if p in prompt_tokens)\n need = 2 if len(parts) >= 2 else 1\n cat = \"\".join(parts)\n return bool(inter >= need or cat in prompt_tokens)\n\n\ndef prompt_token_set(prompt_q: str) -> set:\n try:\n import re as _re\n toks = _re.findall(r\"[A-Za-z0-9_]+\", (prompt_q or \"\").lower())\n return set(toks)\n except Exception:\n return set()\n\n\ndef _signature_tokens(sig: Optional[str]) -> List[str]:\n if not sig:\n return []\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", sig) if len(t) >= 2]\n return toks\n except Exception:\n return []\n\n\ndef _doc_head_tokens(doc: Optional[str]) -> List[str]:\n if not doc:\n return []\n try:\n head = (doc or \"\").splitlines()[0].lower()[:200]\n return [t for t in re.findall(r\"[a-z0-9_]+\", head) if len(t) >= 3]\n except Exception:\n return []\n\n\ndef _load_self_queries(path: Optional[str]) -> Dict[str, List[str]]:\n out: Dict[str, List[str]] = {}\n if not path:\n return out\n try:\n import json\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection._doc_head_tokens","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection._doc_head_tokens#L107-L114","kind":"function","name":"_doc_head_tokens","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":107,"end_line":114,"context_start_line":87,"context_end_line":134,"code":"\ndef prompt_token_set(prompt_q: str) -> set:\n try:\n import re as _re\n toks = _re.findall(r\"[A-Za-z0-9_]+\", (prompt_q or \"\").lower())\n return set(toks)\n except Exception:\n return set()\n\n\ndef _signature_tokens(sig: Optional[str]) -> List[str]:\n if not sig:\n return []\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", sig) if len(t) >= 2]\n return toks\n except Exception:\n return []\n\n\ndef _doc_head_tokens(doc: Optional[str]) -> List[str]:\n if not doc:\n return []\n try:\n head = (doc or \"\").splitlines()[0].lower()[:200]\n return [t for t in re.findall(r\"[a-z0-9_]+\", head) if len(t) >= 3]\n except Exception:\n return []\n\n\ndef _load_self_queries(path: Optional[str]) -> Dict[str, List[str]]:\n out: Dict[str, List[str]] = {}\n if not path:\n return out\n try:\n import json\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n try:\n obj = json.loads(ln)\n sym = str(obj.get(\"symbol\", \"\"))\n qs = [str(q) for q in (obj.get(\"queries\") or [])]\n if sym and qs:\n out[sym] = qs[:5]\n except Exception:\n continue\n except Exception:\n return {}","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection._load_self_queries","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection._load_self_queries#L117-L135","kind":"function","name":"_load_self_queries","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":117,"end_line":135,"context_start_line":97,"context_end_line":155,"code":"def _signature_tokens(sig: Optional[str]) -> List[str]:\n if not sig:\n return []\n try:\n toks = [t for t in re.findall(r\"[A-Za-z0-9_]+\", sig) if len(t) >= 2]\n return toks\n except Exception:\n return []\n\n\ndef _doc_head_tokens(doc: Optional[str]) -> List[str]:\n if not doc:\n return []\n try:\n head = (doc or \"\").splitlines()[0].lower()[:200]\n return [t for t in re.findall(r\"[a-z0-9_]+\", head) if len(t) >= 3]\n except Exception:\n return []\n\n\ndef _load_self_queries(path: Optional[str]) -> Dict[str, List[str]]:\n out: Dict[str, List[str]] = {}\n if not path:\n return out\n try:\n import json\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n try:\n obj = json.loads(ln)\n sym = str(obj.get(\"symbol\", \"\"))\n qs = [str(q) for q in (obj.get(\"queries\") or [])]\n if sym and qs:\n out[sym] = qs[:5]\n except Exception:\n continue\n except Exception:\n return {}\n return out\n\n\ndef rerank_modules_and_files(\n repo_root: str,\n prompt: str,\n modules: List[str],\n files: List[str],\n *,\n ignore: Optional[List[str]] = None,\n self_queries_path: Optional[str] = None,\n weights: Tuple[float, float, float, float, float] = (0.35, 0.25, 0.15, 0.15, 0.10),\n) -> Tuple[List[str], List[str]]:\n \"\"\"Hybrid reranker over initial module/file candidates.\n\n weights: (w_sig, w_call, w_cov, w_doc, w_vis)\n \"\"\"\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n p_tokens = prompt_token_set(prompt)\n w_sig, w_call, w_cov, w_doc, w_vis = [float(x) for x in weights]\n","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection.rerank_modules_and_files","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection.rerank_modules_and_files#L138-L222","kind":"function","name":"rerank_modules_and_files","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":138,"end_line":222,"context_start_line":118,"context_end_line":224,"code":" out: Dict[str, List[str]] = {}\n if not path:\n return out\n try:\n import json\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n for ln in fh:\n try:\n obj = json.loads(ln)\n sym = str(obj.get(\"symbol\", \"\"))\n qs = [str(q) for q in (obj.get(\"queries\") or [])]\n if sym and qs:\n out[sym] = qs[:5]\n except Exception:\n continue\n except Exception:\n return {}\n return out\n\n\ndef rerank_modules_and_files(\n repo_root: str,\n prompt: str,\n modules: List[str],\n files: List[str],\n *,\n ignore: Optional[List[str]] = None,\n self_queries_path: Optional[str] = None,\n weights: Tuple[float, float, float, float, float] = (0.35, 0.25, 0.15, 0.15, 0.10),\n) -> Tuple[List[str], List[str]]:\n \"\"\"Hybrid reranker over initial module/file candidates.\n\n weights: (w_sig, w_call, w_cov, w_doc, w_vis)\n \"\"\"\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n p_tokens = prompt_token_set(prompt)\n w_sig, w_call, w_cov, w_doc, w_vis = [float(x) for x in weights]\n\n # Self-queries\n sq = _load_self_queries(self_queries_path)\n sq_tokens_by_sym: Dict[str, set] = {}\n for sym, qs in sq.items():\n toks = set()\n for q in qs:\n toks.update(prompt_token_set(q))\n sq_tokens_by_sym[sym] = toks\n\n # Pre-compute per-module features\n sig_by_mod: Dict[str, set] = {}\n doc_by_mod: Dict[str, set] = {}\n cov_by_mod: Dict[str, int] = {}\n call_neighbors: Dict[str, set] = {}\n vis_by_mod: Dict[str, float] = {}\n\n for fqn, s in g.symbols_by_fqn.items():\n m = s.module\n sig_by_mod.setdefault(m, set()).update(_signature_tokens(s.signature))\n doc_by_mod.setdefault(m, set()).update(_doc_head_tokens(s.doc))\n vis_by_mod[m] = 1.0 if (not m.endswith(\".__init__\") and not m.endswith(\".tests\")) else 0.8\n # coverage proxy: pytest nodes count\n for m, nodes in g.pytest_nodes_by_module.items():\n cov_by_mod[m] = len(nodes or [])\n # call neighborhoods\n for caller, callee in g.calls:\n cm = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n call_neighbors.setdefault(cm, set()).add(callee)\n\n def _module_score(m: str) -> float:\n sig = sig_by_mod.get(m, set())\n doc = doc_by_mod.get(m, set())\n sig_overlap = len(sig & p_tokens) / float(max(1, len(sig))) if sig else 0.0\n doc_overlap = len(doc & p_tokens) / float(max(1, len(doc))) if doc else 0.0\n call_compat = float(len(call_neighbors.get(m, set()))) / 25.0 # crude proxy, cap later\n cov = float(min(1.0, (cov_by_mod.get(m, 0) / 5.0)))\n vis = float(vis_by_mod.get(m, 1.0))\n # self-query boost if any symbol under module matches prompt tokens\n sq_boost = 0.0\n for fqn in g.defs_in(m):\n toks = sq_tokens_by_sym.get(fqn)\n if toks and (toks & p_tokens):\n sq_boost = max(sq_boost, 0.15)\n break\n score = (\n w_sig * sig_overlap + w_call * min(1.0, call_compat) + w_cov * cov + w_doc * doc_overlap + w_vis * vis + sq_boost\n )\n return float(max(0.0, min(2.0, score)))\n\n mod_scored = [(m, _module_score(m)) for m in modules]\n mod_scored.sort(key=lambda x: x[1], reverse=True)\n modules_new = [m for m, _s in mod_scored][: max(1, len(modules))]\n\n # File scores inherit module score; break ties by filename token overlap\n file_scored: List[Tuple[str, float]] = []\n for f in files:\n m = g.module_for_file(f)\n base = 0.0\n if m:\n base = next((s for (mm, s) in mod_scored if mm == m), 0.0)\n fn_toks = set([t for t in re.findall(r\"[a-z0-9_]+\", os.path.basename(f).lower()) if len(t) >= 3])\n fn_overlap = len(fn_toks & p_tokens) / float(max(1, len(fn_toks))) if fn_toks else 0.0\n file_scored.append((f, float(base + 0.05 * fn_overlap)))\n file_scored.sort(key=lambda x: x[1], reverse=True)\n files_new = [f for f, _s in file_scored][: max(1, len(files))]\n\n return modules_new, files_new\n\n","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.selection._module_score","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.selection._module_score#L185-L203","kind":"function","name":"_module_score","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":185,"end_line":203,"context_start_line":165,"context_end_line":223,"code":" # Pre-compute per-module features\n sig_by_mod: Dict[str, set] = {}\n doc_by_mod: Dict[str, set] = {}\n cov_by_mod: Dict[str, int] = {}\n call_neighbors: Dict[str, set] = {}\n vis_by_mod: Dict[str, float] = {}\n\n for fqn, s in g.symbols_by_fqn.items():\n m = s.module\n sig_by_mod.setdefault(m, set()).update(_signature_tokens(s.signature))\n doc_by_mod.setdefault(m, set()).update(_doc_head_tokens(s.doc))\n vis_by_mod[m] = 1.0 if (not m.endswith(\".__init__\") and not m.endswith(\".tests\")) else 0.8\n # coverage proxy: pytest nodes count\n for m, nodes in g.pytest_nodes_by_module.items():\n cov_by_mod[m] = len(nodes or [])\n # call neighborhoods\n for caller, callee in g.calls:\n cm = caller.rsplit(\".\", 1)[0] if \".\" in caller else caller\n call_neighbors.setdefault(cm, set()).add(callee)\n\n def _module_score(m: str) -> float:\n sig = sig_by_mod.get(m, set())\n doc = doc_by_mod.get(m, set())\n sig_overlap = len(sig & p_tokens) / float(max(1, len(sig))) if sig else 0.0\n doc_overlap = len(doc & p_tokens) / float(max(1, len(doc))) if doc else 0.0\n call_compat = float(len(call_neighbors.get(m, set()))) / 25.0 # crude proxy, cap later\n cov = float(min(1.0, (cov_by_mod.get(m, 0) / 5.0)))\n vis = float(vis_by_mod.get(m, 1.0))\n # self-query boost if any symbol under module matches prompt tokens\n sq_boost = 0.0\n for fqn in g.defs_in(m):\n toks = sq_tokens_by_sym.get(fqn)\n if toks and (toks & p_tokens):\n sq_boost = max(sq_boost, 0.15)\n break\n score = (\n w_sig * sig_overlap + w_call * min(1.0, call_compat) + w_cov * cov + w_doc * doc_overlap + w_vis * vis + sq_boost\n )\n return float(max(0.0, min(2.0, score)))\n\n mod_scored = [(m, _module_score(m)) for m in modules]\n mod_scored.sort(key=lambda x: x[1], reverse=True)\n modules_new = [m for m, _s in mod_scored][: max(1, len(modules))]\n\n # File scores inherit module score; break ties by filename token overlap\n file_scored: List[Tuple[str, float]] = []\n for f in files:\n m = g.module_for_file(f)\n base = 0.0\n if m:\n base = next((s for (mm, s) in mod_scored if mm == m), 0.0)\n fn_toks = set([t for t in re.findall(r\"[a-z0-9_]+\", os.path.basename(f).lower()) if len(t) >= 3])\n fn_overlap = len(fn_toks & p_tokens) / float(max(1, len(fn_toks))) if fn_toks else 0.0\n file_scored.append((f, float(base + 0.05 * fn_overlap)))\n file_scored.sort(key=lambda x: x[1], reverse=True)\n files_new = [f for f, _s in file_scored][: max(1, len(files))]\n\n return modules_new, files_new\n","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.tune","uri":"program://Program_Conditioned_Adapter/module/examples.python_repo_grounded_qa.modules.tune#L1-L303","kind":"module","name":"examples.python_repo_grounded_qa.modules.tune","path":"examples/python_repo_grounded_qa/modules/tune.py","language":"python","start_line":1,"end_line":303,"context_start_line":1,"context_end_line":303,"code":"from __future__ import annotations\n\nimport os\nimport json\nimport time\nimport hashlib\nimport subprocess\nimport sys\nfrom typing import Dict, List, Tuple, Optional, Set\n\nfrom ..code_graph import CodeGraph\nfrom .prompts import build_prompts_for_module\nfrom .verify import verify_with_tests, extract_citations\nfrom modules.embedding import build_subgraph_embedding_from_graph\nfrom modules.adapter import (\n save_npz,\n generate_lora_from_embedding,\n)\nfrom model.inspect import detect_target_shapes_from_model_full, detect_target_shapes_from_model\n\n\ndef _run_repo_adapter_cli(\n model: str,\n adapters_dir: str,\n repo: str,\n prompt: str,\n *,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n timeout_sec: Optional[int] = None,\n) -> Tuple[int, str, str]:\n \"\"\"Minimal wrapper around the generic run CLI to obtain an answer text.\"\"\"\n cmd = [\n sys.executable,\n \"-m\",\n \"run\",\n \"--model\", model,\n \"--sources\", repo,\n \"--adapters-dir\", adapters_dir,\n \"--prompt\", prompt,\n \"--context-tokens\", str(int(context_tokens)),\n ]\n if cache_dir:\n cmd += [\"--cache-dir\", str(cache_dir)]\n if device and device != \"cpu\":\n cmd += [\"--device-map\", \"auto\"]\n if gpu_ids:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_ids)\n try:\n proc = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout_sec)\n return int(proc.returncode), proc.stdout or \"\", proc.stderr or \"\"\n except subprocess.TimeoutExpired as te:\n return 124, \"\", str(te)\n except Exception as e:\n return 1, \"\", str(e)\n\n\ndef distill_repo(\n repo: str,\n model: str,\n adapters: str,\n out_dir: str,\n *,\n ignore: Optional[List[str]] = None,\n max_prompts: int = 3,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n timeout_sec: Optional[int] = None,\n resume: bool = False,\n log_every: int = 25,\n citations_per_paragraph: bool = False,\n) -> Tuple[Set[str], str]:\n out_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(out_dir)))\n os.makedirs(out_dir, exist_ok=True)\n g = CodeGraph.load_or_build(repo, ignore=[s for s in (ignore or []) if s])\n\n buffer_path = os.path.join(out_dir, \"distill.jsonl\")\n verified_modules: Dict[str, bool] = {}\n processed: Dict[Tuple[str, str], bool] = {}\n if resume and os.path.isfile(buffer_path):\n try:\n with open(buffer_path, \"r\", encoding=\"utf-8\") as fh:\n for line in fh:\n try:\n row = json.loads(line)\n k = (str(row.get(\"module\", \"\")), str(row.get(\"prompt\", \"\")))\n processed[k] = True\n except Exception:\n continue\n except Exception:\n processed = {}\n\n mode = (\"a\" if resume and os.path.isfile(buffer_path) else \"w\")\n t0 = time.time()\n logs_dir = os.path.join(out_dir, \"logs\")\n try:\n os.makedirs(logs_dir, exist_ok=True)\n except Exception:\n pass\n\n env = os.environ.copy()\n with open(buffer_path, mode, encoding=\"utf-8\") as fh:\n for idx, module in enumerate(sorted(g.modules.keys()), start=1):\n mi = g.modules[module]\n if mi.is_test:\n continue\n prompts = build_prompts_for_module(g, module, max_q=int(max_prompts))\n module_verified = False\n for pidx, q in enumerate(prompts, start=1):\n if resume and ((module, q) in processed):\n continue\n try:\n print(json.dumps({\n \"event\": \"start_prompt\",\n \"module\": module,\n \"prompt_index\": pidx,\n \"context_tokens\": int(context_tokens),\n }), flush=True)\n except Exception:\n pass\n rc, ans_text, err_text = _run_repo_adapter_cli(\n model,\n adapters,\n repo,\n q,\n cache_dir=cache_dir,\n device=device,\n gpu_ids=gpu_ids,\n context_tokens=int(context_tokens),\n timeout_sec=(int(timeout_sec) if (timeout_sec and int(timeout_sec) > 0) else None),\n )\n ok = (rc == 0)\n if ok:\n cites = extract_citations(ans_text)\n ok = bool(cites)\n if ok:\n ok = verify_with_tests(g, module, repo_root=repo, env=env)\n try:\n h = hashlib.sha1((module + \"\\n\" + q).encode(\"utf-8\", errors=\"ignore\")).hexdigest()[:12]\n log_fp = os.path.join(logs_dir, f\"{module.replace('/', '_')}.{pidx}.{h}.log\")\n with open(log_fp, \"w\", encoding=\"utf-8\") as lf:\n lf.write(ans_text)\n lf.write(\"\\n\\n[stderr]\\n\")\n lf.write(err_text or \"\")\n except Exception:\n pass\n fh.write(json.dumps({\n \"module\": module,\n \"prompt\": q,\n \"answer\": ans_text,\n \"verified\": bool(ok),\n \"citations\": extract_citations(ans_text),\n \"ignore\": [s for s in (ignore or []) if s],\n \"device\": device,\n \"context_tokens\": int(context_tokens),\n \"rc\": int(rc),\n }) + \"\\n\")\n module_verified = module_verified or bool(ok)\n if module_verified:\n verified_modules[module] = True\n if (idx % max(1, int(log_every))) == 0:\n try:\n elapsed = time.time() - t0\n except Exception:\n elapsed = -1.0\n print(json.dumps({\"progress\": {\"done\": idx, \"total\": len(g.modules), \"elapsed_sec\": elapsed}}))\n\n return set(verified_modules.keys()), buffer_path\n\n\ndef export_tuned_adapters(\n repo: str,\n model: str,\n verified_modules: List[str],\n out_dir: str,\n *,\n per_module_adapters: bool = False,\n include_deps: bool = False,\n max_deps: int = 4,\n rank: int = 8,\n cache_dir: Optional[str] = None,\n) -> None:\n os.makedirs(out_dir, exist_ok=True)\n g = CodeGraph.load_or_build(repo)\n emb = build_subgraph_embedding_from_graph(\n g,\n dim=1536,\n seed=0,\n include_modules=sorted(list(verified_modules)),\n include_files=None,\n include_text=True,\n text_max_bytes=250000,\n max_text_tokens=0,\n text_weight=0.25,\n graph_prop_hops=0,\n graph_prop_damp=0.85,\n )\n targets = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"up_proj\", \"down_proj\", \"gate_proj\"]\n adapters = {\"layers\": [], \"rank\": 8, \"d_model\": 0, \"targets\": targets, \"gates\": []}\n manifest = {\n \"repo\": os.path.abspath(repo),\n \"verified_modules\": sorted(list(verified_modules)),\n \"buffer\": os.path.join(out_dir, \"distill.jsonl\"),\n \"schema_version\": 1,\n \"note\": \"Embedding from verified modules; adapter layers empty for downstream mixing.\",\n }\n save_npz(out_dir, embedding=emb, adapters=adapters, manifest=manifest)\n\n if not per_module_adapters:\n return\n\n # Try to infer model dims and target shapes for per-module export\n num_layers: int = 0\n d_model: int = 0\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None\n try:\n target_shapes = detect_target_shapes_from_model_full(model, target_regex=None)\n except Exception:\n target_shapes = None\n if not target_shapes:\n try:\n target_shapes = detect_target_shapes_from_model(model)\n except Exception:\n target_shapes = None\n try:\n from transformers import AutoConfig # type: ignore\n cfg = AutoConfig.from_pretrained(model, cache_dir=cache_dir)\n num_layers = int(getattr(cfg, \"num_hidden_layers\", 0) or 0)\n d_model = int(getattr(cfg, \"hidden_size\", 0) or 0)\n except Exception:\n num_layers, d_model = 0, 0\n\n sub_root = os.path.join(out_dir, \"sub_adapters\")\n os.makedirs(sub_root, exist_ok=True)\n for module in sorted(list(verified_modules)):\n mods = [module]\n if include_deps:\n deps = list(g.module_imports.get(module, []) or [])\n if int(max_deps) > 0:\n deps = deps[: int(max_deps)]\n mods.extend([m for m in deps if m])\n mods_unique = sorted({m for m in mods})\n emb_m = build_subgraph_embedding_from_graph(\n g,\n dim=1536,\n seed=0,\n include_modules=mods_unique,\n include_files=None,\n include_text=True,\n text_max_bytes=250000,\n max_text_tokens=0,\n text_weight=0.25,\n graph_prop_hops=0,\n graph_prop_damp=0.85,\n )\n if (num_layers <= 0) or (d_model <= 0):\n sub_dir = os.path.join(sub_root, module.replace(\"/\", \"_\"))\n os.makedirs(sub_dir, exist_ok=True)\n save_npz(\n sub_dir,\n embedding=emb_m,\n adapters={\"layers\": [], \"rank\": rank, \"d_model\": 0, \"targets\": [], \"gates\": []},\n manifest={\n \"module\": module,\n \"include_modules\": mods_unique,\n \"note\": \"Embedding-only; shapes not inferred\",\n },\n )\n continue\n try:\n adapters_m = generate_lora_from_embedding(\n emb_m[\"z\"],\n d_model=int(d_model),\n num_layers=int(num_layers),\n rank=int(rank),\n seed=0,\n targets=list((target_shapes or {}).keys()) if target_shapes else None,\n target_shapes=target_shapes,\n layer_gate=\"zmean\",\n target_weights=None,\n )\n sub_dir = os.path.join(sub_root, module.replace(\"/\", \"_\"))\n os.makedirs(sub_dir, exist_ok=True)\n save_npz(\n sub_dir,\n embedding=emb_m,\n adapters=adapters_m,\n manifest={\n \"module\": module,\n \"include_modules\": mods_unique,\n \"rank\": int(rank),\n \"d_model\": int(d_model),\n \"layers\": int(num_layers),\n },\n )\n except Exception:\n continue\n\n","source_hash":"c1e3ec462512de538f0c41140ec4e75fcdbe445da18db02db5814709ced92da6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.tune._run_repo_adapter_cli","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.tune._run_repo_adapter_cli#L22-L57","kind":"function","name":"_run_repo_adapter_cli","path":"examples/python_repo_grounded_qa/modules/tune.py","language":"python","start_line":22,"end_line":57,"context_start_line":2,"context_end_line":77,"code":"\nimport os\nimport json\nimport time\nimport hashlib\nimport subprocess\nimport sys\nfrom typing import Dict, List, Tuple, Optional, Set\n\nfrom ..code_graph import CodeGraph\nfrom .prompts import build_prompts_for_module\nfrom .verify import verify_with_tests, extract_citations\nfrom modules.embedding import build_subgraph_embedding_from_graph\nfrom modules.adapter import (\n save_npz,\n generate_lora_from_embedding,\n)\nfrom model.inspect import detect_target_shapes_from_model_full, detect_target_shapes_from_model\n\n\ndef _run_repo_adapter_cli(\n model: str,\n adapters_dir: str,\n repo: str,\n prompt: str,\n *,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n timeout_sec: Optional[int] = None,\n) -> Tuple[int, str, str]:\n \"\"\"Minimal wrapper around the generic run CLI to obtain an answer text.\"\"\"\n cmd = [\n sys.executable,\n \"-m\",\n \"run\",\n \"--model\", model,\n \"--sources\", repo,\n \"--adapters-dir\", adapters_dir,\n \"--prompt\", prompt,\n \"--context-tokens\", str(int(context_tokens)),\n ]\n if cache_dir:\n cmd += [\"--cache-dir\", str(cache_dir)]\n if device and device != \"cpu\":\n cmd += [\"--device-map\", \"auto\"]\n if gpu_ids:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_ids)\n try:\n proc = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout_sec)\n return int(proc.returncode), proc.stdout or \"\", proc.stderr or \"\"\n except subprocess.TimeoutExpired as te:\n return 124, \"\", str(te)\n except Exception as e:\n return 1, \"\", str(e)\n\n\ndef distill_repo(\n repo: str,\n model: str,\n adapters: str,\n out_dir: str,\n *,\n ignore: Optional[List[str]] = None,\n max_prompts: int = 3,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n timeout_sec: Optional[int] = None,\n resume: bool = False,\n log_every: int = 25,\n citations_per_paragraph: bool = False,\n) -> Tuple[Set[str], str]:\n out_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(out_dir)))","source_hash":"c1e3ec462512de538f0c41140ec4e75fcdbe445da18db02db5814709ced92da6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.tune.distill_repo","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.tune.distill_repo#L60-L172","kind":"function","name":"distill_repo","path":"examples/python_repo_grounded_qa/modules/tune.py","language":"python","start_line":60,"end_line":172,"context_start_line":40,"context_end_line":192,"code":" \"--sources\", repo,\n \"--adapters-dir\", adapters_dir,\n \"--prompt\", prompt,\n \"--context-tokens\", str(int(context_tokens)),\n ]\n if cache_dir:\n cmd += [\"--cache-dir\", str(cache_dir)]\n if device and device != \"cpu\":\n cmd += [\"--device-map\", \"auto\"]\n if gpu_ids:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_ids)\n try:\n proc = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout_sec)\n return int(proc.returncode), proc.stdout or \"\", proc.stderr or \"\"\n except subprocess.TimeoutExpired as te:\n return 124, \"\", str(te)\n except Exception as e:\n return 1, \"\", str(e)\n\n\ndef distill_repo(\n repo: str,\n model: str,\n adapters: str,\n out_dir: str,\n *,\n ignore: Optional[List[str]] = None,\n max_prompts: int = 3,\n cache_dir: Optional[str] = None,\n device: str = \"cpu\",\n gpu_ids: Optional[str] = None,\n context_tokens: int = 5000,\n timeout_sec: Optional[int] = None,\n resume: bool = False,\n log_every: int = 25,\n citations_per_paragraph: bool = False,\n) -> Tuple[Set[str], str]:\n out_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(out_dir)))\n os.makedirs(out_dir, exist_ok=True)\n g = CodeGraph.load_or_build(repo, ignore=[s for s in (ignore or []) if s])\n\n buffer_path = os.path.join(out_dir, \"distill.jsonl\")\n verified_modules: Dict[str, bool] = {}\n processed: Dict[Tuple[str, str], bool] = {}\n if resume and os.path.isfile(buffer_path):\n try:\n with open(buffer_path, \"r\", encoding=\"utf-8\") as fh:\n for line in fh:\n try:\n row = json.loads(line)\n k = (str(row.get(\"module\", \"\")), str(row.get(\"prompt\", \"\")))\n processed[k] = True\n except Exception:\n continue\n except Exception:\n processed = {}\n\n mode = (\"a\" if resume and os.path.isfile(buffer_path) else \"w\")\n t0 = time.time()\n logs_dir = os.path.join(out_dir, \"logs\")\n try:\n os.makedirs(logs_dir, exist_ok=True)\n except Exception:\n pass\n\n env = os.environ.copy()\n with open(buffer_path, mode, encoding=\"utf-8\") as fh:\n for idx, module in enumerate(sorted(g.modules.keys()), start=1):\n mi = g.modules[module]\n if mi.is_test:\n continue\n prompts = build_prompts_for_module(g, module, max_q=int(max_prompts))\n module_verified = False\n for pidx, q in enumerate(prompts, start=1):\n if resume and ((module, q) in processed):\n continue\n try:\n print(json.dumps({\n \"event\": \"start_prompt\",\n \"module\": module,\n \"prompt_index\": pidx,\n \"context_tokens\": int(context_tokens),\n }), flush=True)\n except Exception:\n pass\n rc, ans_text, err_text = _run_repo_adapter_cli(\n model,\n adapters,\n repo,\n q,\n cache_dir=cache_dir,\n device=device,\n gpu_ids=gpu_ids,\n context_tokens=int(context_tokens),\n timeout_sec=(int(timeout_sec) if (timeout_sec and int(timeout_sec) > 0) else None),\n )\n ok = (rc == 0)\n if ok:\n cites = extract_citations(ans_text)\n ok = bool(cites)\n if ok:\n ok = verify_with_tests(g, module, repo_root=repo, env=env)\n try:\n h = hashlib.sha1((module + \"\\n\" + q).encode(\"utf-8\", errors=\"ignore\")).hexdigest()[:12]\n log_fp = os.path.join(logs_dir, f\"{module.replace('/', '_')}.{pidx}.{h}.log\")\n with open(log_fp, \"w\", encoding=\"utf-8\") as lf:\n lf.write(ans_text)\n lf.write(\"\\n\\n[stderr]\\n\")\n lf.write(err_text or \"\")\n except Exception:\n pass\n fh.write(json.dumps({\n \"module\": module,\n \"prompt\": q,\n \"answer\": ans_text,\n \"verified\": bool(ok),\n \"citations\": extract_citations(ans_text),\n \"ignore\": [s for s in (ignore or []) if s],\n \"device\": device,\n \"context_tokens\": int(context_tokens),\n \"rc\": int(rc),\n }) + \"\\n\")\n module_verified = module_verified or bool(ok)\n if module_verified:\n verified_modules[module] = True\n if (idx % max(1, int(log_every))) == 0:\n try:\n elapsed = time.time() - t0\n except Exception:\n elapsed = -1.0\n print(json.dumps({\"progress\": {\"done\": idx, \"total\": len(g.modules), \"elapsed_sec\": elapsed}}))\n\n return set(verified_modules.keys()), buffer_path\n\n\ndef export_tuned_adapters(\n repo: str,\n model: str,\n verified_modules: List[str],\n out_dir: str,\n *,\n per_module_adapters: bool = False,\n include_deps: bool = False,\n max_deps: int = 4,\n rank: int = 8,\n cache_dir: Optional[str] = None,\n) -> None:\n os.makedirs(out_dir, exist_ok=True)\n g = CodeGraph.load_or_build(repo)\n emb = build_subgraph_embedding_from_graph(\n g,\n dim=1536,\n seed=0,","source_hash":"c1e3ec462512de538f0c41140ec4e75fcdbe445da18db02db5814709ced92da6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:examples.python_repo_grounded_qa.modules.tune.export_tuned_adapters","uri":"program://Program_Conditioned_Adapter/function/examples.python_repo_grounded_qa.modules.tune.export_tuned_adapters#L175-L301","kind":"function","name":"export_tuned_adapters","path":"examples/python_repo_grounded_qa/modules/tune.py","language":"python","start_line":175,"end_line":301,"context_start_line":155,"context_end_line":303,"code":" \"verified\": bool(ok),\n \"citations\": extract_citations(ans_text),\n \"ignore\": [s for s in (ignore or []) if s],\n \"device\": device,\n \"context_tokens\": int(context_tokens),\n \"rc\": int(rc),\n }) + \"\\n\")\n module_verified = module_verified or bool(ok)\n if module_verified:\n verified_modules[module] = True\n if (idx % max(1, int(log_every))) == 0:\n try:\n elapsed = time.time() - t0\n except Exception:\n elapsed = -1.0\n print(json.dumps({\"progress\": {\"done\": idx, \"total\": len(g.modules), \"elapsed_sec\": elapsed}}))\n\n return set(verified_modules.keys()), buffer_path\n\n\ndef export_tuned_adapters(\n repo: str,\n model: str,\n verified_modules: List[str],\n out_dir: str,\n *,\n per_module_adapters: bool = False,\n include_deps: bool = False,\n max_deps: int = 4,\n rank: int = 8,\n cache_dir: Optional[str] = None,\n) -> None:\n os.makedirs(out_dir, exist_ok=True)\n g = CodeGraph.load_or_build(repo)\n emb = build_subgraph_embedding_from_graph(\n g,\n dim=1536,\n seed=0,\n include_modules=sorted(list(verified_modules)),\n include_files=None,\n include_text=True,\n text_max_bytes=250000,\n max_text_tokens=0,\n text_weight=0.25,\n graph_prop_hops=0,\n graph_prop_damp=0.85,\n )\n targets = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"up_proj\", \"down_proj\", \"gate_proj\"]\n adapters = {\"layers\": [], \"rank\": 8, \"d_model\": 0, \"targets\": targets, \"gates\": []}\n manifest = {\n \"repo\": os.path.abspath(repo),\n \"verified_modules\": sorted(list(verified_modules)),\n \"buffer\": os.path.join(out_dir, \"distill.jsonl\"),\n \"schema_version\": 1,\n \"note\": \"Embedding from verified modules; adapter layers empty for downstream mixing.\",\n }\n save_npz(out_dir, embedding=emb, adapters=adapters, manifest=manifest)\n\n if not per_module_adapters:\n return\n\n # Try to infer model dims and target shapes for per-module export\n num_layers: int = 0\n d_model: int = 0\n target_shapes: Optional[Dict[str, Tuple[int, int]]] = None\n try:\n target_shapes = detect_target_shapes_from_model_full(model, target_regex=None)\n except Exception:\n target_shapes = None\n if not target_shapes:\n try:\n target_shapes = detect_target_shapes_from_model(model)\n except Exception:\n target_shapes = None\n try:\n from transformers import AutoConfig # type: ignore\n cfg = AutoConfig.from_pretrained(model, cache_dir=cache_dir)\n num_layers = int(getattr(cfg, \"num_hidden_layers\", 0) or 0)\n d_model = int(getattr(cfg, \"hidden_size\", 0) or 0)\n except Exception:\n num_layers, d_model = 0, 0\n\n sub_root = os.path.join(out_dir, \"sub_adapters\")\n os.makedirs(sub_root, exist_ok=True)\n for module in sorted(list(verified_modules)):\n mods = [module]\n if include_deps:\n deps = list(g.module_imports.get(module, []) or [])\n if int(max_deps) > 0:\n deps = deps[: int(max_deps)]\n mods.extend([m for m in deps if m])\n mods_unique = sorted({m for m in mods})\n emb_m = build_subgraph_embedding_from_graph(\n g,\n dim=1536,\n seed=0,\n include_modules=mods_unique,\n include_files=None,\n include_text=True,\n text_max_bytes=250000,\n max_text_tokens=0,\n text_weight=0.25,\n graph_prop_hops=0,\n graph_prop_damp=0.85,\n )\n if (num_layers <= 0) or (d_model <= 0):\n sub_dir = os.path.join(sub_root, module.replace(\"/\", \"_\"))\n os.makedirs(sub_dir, exist_ok=True)\n save_npz(\n sub_dir,\n embedding=emb_m,\n adapters={\"layers\": [], \"rank\": rank, \"d_model\": 0, \"targets\": [], \"gates\": []},\n manifest={\n \"module\": module,\n \"include_modules\": mods_unique,\n \"note\": \"Embedding-only; shapes not inferred\",\n },\n )\n continue\n try:\n adapters_m = generate_lora_from_embedding(\n emb_m[\"z\"],\n d_model=int(d_model),\n num_layers=int(num_layers),\n rank=int(rank),\n seed=0,\n targets=list((target_shapes or {}).keys()) if target_shapes else None,\n target_shapes=target_shapes,\n layer_gate=\"zmean\",\n target_weights=None,\n )\n sub_dir = os.path.join(sub_root, module.replace(\"/\", \"_\"))\n os.makedirs(sub_dir, exist_ok=True)\n save_npz(\n sub_dir,\n embedding=emb_m,\n adapters=adapters_m,\n manifest={\n \"module\": module,\n \"include_modules\": mods_unique,\n \"rank\": int(rank),\n \"d_model\": int(d_model),\n \"layers\": int(num_layers),\n },\n )\n except Exception:\n continue\n\n","source_hash":"c1e3ec462512de538f0c41140ec4e75fcdbe445da18db02db5814709ced92da6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.trace_llama_parity","uri":"program://Program_Conditioned_Adapter/module/scripts.trace_llama_parity#L1-L211","kind":"module","name":"scripts.trace_llama_parity","path":"scripts/trace_llama_parity.py","language":"python","start_line":1,"end_line":211,"context_start_line":1,"context_end_line":211,"code":"import os\nimport json\nimport argparse\nfrom typing import Dict, Any, List\n\nimport torch\n\n\n@torch.no_grad()\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--prompt\", default=\"Hello\")\n p.add_argument(\"--device\", default=\"cuda\" if torch.cuda.is_available() else \"cpu\")\n p.add_argument(\"--dtype\", default=\"bfloat16\")\n p.add_argument(\"--max-layers\", type=int, default=8, help=\"Limit layers to trace for brevity\")\n p.add_argument(\"--save-logits\", action=\"store_true\", help=\"Save HF/local logits to disk to minimize RAM/VRAM use\")\n p.add_argument(\"--out-dir\", default=\"\", help=\"Directory to write saved tensors (defaults to cache-dir/trace)\")\n args = p.parse_args()\n\n os.makedirs(args.cache_dir, exist_ok=True)\n os.environ.setdefault(\"ATTN_BACKEND\", \"torch\")\n\n from transformers import AutoTokenizer, AutoModelForCausalLM # type: ignore\n tok = AutoTokenizer.from_pretrained(args.model, use_fast=True, cache_dir=args.cache_dir)\n x = tok(args.prompt, return_tensors=\"pt\")\n x = {k: v.to(args.device) for k, v in x.items()}\n\n # HF model (match dtype)\n hf_dtype = getattr(torch, args.dtype) if hasattr(torch, args.dtype) else torch.float32\n hf = AutoModelForCausalLM.from_pretrained(\n args.model,\n torch_dtype=hf_dtype,\n device_map={\"\": args.device},\n low_cpu_mem_usage=True,\n cache_dir=args.cache_dir,\n ).eval()\n\n # Build masks and positions exactly once (no cache), compute HF RoPE and per-layer traces first\n input_ids = x[\"input_ids\"]\n attention_mask = x.get(\"attention_mask\", None)\n B, T = int(input_ids.shape[0]), int(input_ids.shape[1])\n\n # HF position_ids and mask\n position_ids_hf = torch.arange(T, device=args.device).unsqueeze(0)\n from transformers.models.llama.modeling_llama import LlamaModel # type: ignore\n # Prepare rotary cos/sin via HF module\n llama_model: LlamaModel = hf.model # type: ignore[attr-defined]\n cos_hf, sin_hf = llama_model.rotary_emb(\n llama_model.embed_tokens(input_ids), position_ids=position_ids_hf\n )\n\n # Run HF forward with hooks to collect per-layer hidden (store on CPU to save VRAM)\n hf_layers_out: List[torch.Tensor] = []\n\n def _hf_layer_hook(module, inputs, output):\n hs = output[0] if isinstance(output, tuple) else output\n hf_layers_out.append(hs.detach().to(device=\"cpu\", dtype=torch.float32))\n\n hooks = []\n try:\n for i, layer in enumerate(llama_model.layers[: int(args.max_layers) ]):\n hooks.append(layer.register_forward_hook(_hf_layer_hook))\n _ = hf(input_ids=input_ids, attention_mask=attention_mask)\n finally:\n for h in hooks:\n try:\n h.remove()\n except Exception:\n pass\n\n # Final HF logits (CPU copy)\n out_hf = hf(input_ids=input_ids, attention_mask=attention_mask)\n logits_hf = out_hf.logits[:, -1, :].to(\"cpu\", torch.float32)\n hf_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n os.makedirs(out_dir, exist_ok=True)\n hf_logits_path = os.path.join(out_dir, \"hf_logits.pt\")\n torch.save(logits_hf, hf_logits_path)\n del logits_hf\n logits_hf = None\n\n # Free HF before building local to avoid OOM\n del out_hf\n del llama_model\n del hf\n try:\n torch.cuda.empty_cache()\n except Exception:\n pass\n\n # Local model (now that HF is freed)\n from model.hf_snapshot import ensure_snapshot\n ckpt_dir = ensure_snapshot(args.model, args.cache_dir)\n from specs.config import ModelConfig\n from model.factory import build_causal_lm\n from model.hf_llama_loader import load_hf_llama_weights_into_local\n cfg = json.load(open(os.path.join(ckpt_dir, \"config.json\"), \"r\", encoding=\"utf-8\"))\n mc = ModelConfig(\n d_model=int(cfg.get(\"hidden_size\")),\n n_heads=int(cfg.get(\"num_attention_heads\")),\n n_layers=int(cfg.get(\"num_hidden_layers\")),\n d_ff=int(cfg.get(\"intermediate_size\")),\n vocab_size=int(cfg.get(\"vocab_size\")),\n head_dim=int(cfg.get(\"head_dim\", int(cfg.get(\"hidden_size\")) // int(cfg.get(\"num_attention_heads\")))),\n rope_theta=float(cfg.get(\"rope_parameters\", {}).get(\"rope_theta\", cfg.get(\"rope_theta\", 1e6))),\n dtype=(args.dtype if args.device.startswith(\"cuda\") else \"float32\"),\n attn_impl=\"sdpa\",\n rms_norm_eps=float(cfg.get(\"rms_norm_eps\", 1e-6)),\n rope_scaling_type=(cfg.get(\"rope_scaling\", {}) or {}).get(\"type\"),\n rope_scaling_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"factor\"),\n rope_scaling_original_max_position_embeddings=(cfg.get(\"rope_scaling\", {}) or {}).get(\"original_max_position_embeddings\"),\n rope_scaling_low_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"low_freq_factor\"),\n rope_scaling_high_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"high_freq_factor\"),\n )\n n_kv_heads = int(cfg.get(\"num_key_value_heads\", mc.n_heads))\n tie_we = bool(cfg.get(\"tie_word_embeddings\", True))\n local = build_causal_lm(mc, block=\"llama\", n_kv_heads=n_kv_heads, tie_weights=tie_we)\n load_hf_llama_weights_into_local(local, ckpt_dir)\n local = local.to(device=args.device, dtype=hf_dtype).eval()\n\n # Local position_ids and mask/cos/sin\n from tensor.masking import create_causal_mask\n from tensor.positional import RotaryEmbeddingHF as LocalRotary\n position_ids_local = torch.arange(T, device=args.device).unsqueeze(0)\n add_mask_local = create_causal_mask(\n input_embeds=local.embed(input_ids),\n attention_mask=attention_mask,\n cache_position=None,\n position_ids=position_ids_local,\n past_key_values=None,\n ) # (B,1,T,S)\n rope = LocalRotary(\n head_dim=int(mc.head_dim or mc.d_model // mc.n_heads),\n base_theta=float(mc.rope_theta),\n attention_scaling=float(getattr(mc, \"rope_attention_scaling\", 1.0) or 1.0),\n device=input_ids.device,\n scaling_type=getattr(mc, \"rope_scaling_type\", None),\n scaling_factor=getattr(mc, \"rope_scaling_factor\", None),\n original_max_position_embeddings=getattr(mc, \"rope_scaling_original_max_position_embeddings\", None),\n low_freq_factor=getattr(mc, \"rope_scaling_low_freq_factor\", None),\n high_freq_factor=getattr(mc, \"rope_scaling_high_freq_factor\", None),\n )\n cos_loc, sin_loc = rope.forward(local.embed(input_ids), position_ids=position_ids_local)\n\n def _stats(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False).view(-1)\n b32 = b.detach().to(torch.float32, copy=False).view(-1)\n diff = (a32 - b32)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())\n mean_abs = float(diff.abs().mean().item())\n denom = float(max(1e-12, a32.norm().item()))\n rel = float(l2 / denom)\n cos = float(torch.nn.functional.cosine_similarity(a32.view(1, -1), b32.view(1, -1)).item())\n return {\"l2\": l2, \"max_abs\": max_abs, \"mean_abs\": mean_abs, \"rel_l2\": rel, \"cos\": cos}\n\n report: Dict[str, Any] = {\"layers\": []}\n\n # Compare masks and RoPE\n report[\"mask\"] = {\n \"local_shape\": tuple(int(s) for s in add_mask_local.shape),\n \"local_inf_counts\": int(torch.isinf(add_mask_local).sum().item()),\n }\n report[\"rope\"] = {\n \"cos\": _stats(cos_loc, cos_hf),\n \"sin\": _stats(sin_loc, sin_hf),\n }\n\n # Run local forward step-by-step to collect per-layer hidden\n local_layers_out: List[torch.Tensor] = []\n x_loc = local.embed(input_ids)\n mask_loc = add_mask_local\n pos_ids = position_ids_local\n for i, blk in enumerate(local.blocks[: int(args.max_layers) ]):\n x_loc = blk(x_loc, mask_loc, None, (cos_loc, sin_loc), pos_ids)\n local_layers_out.append(x_loc.detach().to(device=\"cpu\", dtype=torch.float32))\n\n # Compare per-layer outputs\n for i in range(min(len(hf_layers_out), len(local_layers_out))):\n stats = _stats(local_layers_out[i], hf_layers_out[i])\n report[\"layers\"].append({\"index\": i, **stats})\n\n # Final logits comparison (one step)\n out_loc = local(input_ids=input_ids, attention_mask=attention_mask, return_dict=True)\n logits_loc = out_loc[\"logits\"][:, -1, :].to(\"cpu\", torch.float32)\n loc_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n os.makedirs(out_dir, exist_ok=True)\n loc_logits_path = os.path.join(out_dir, \"local_logits.pt\")\n torch.save(logits_loc, loc_logits_path)\n # Load from disk for stats if we freed memory\n if logits_hf is None and hf_logits_path:\n logits_hf = torch.load(hf_logits_path, map_location=\"cpu\").to(torch.float32)\n if args.save_logits and loc_logits_path is not None:\n # ensure we use the same on-disk tensor for consistency\n logits_loc = torch.load(loc_logits_path, map_location=\"cpu\").to(torch.float32)\n report[\"logits\"] = _stats(logits_loc, logits_hf)\n if args.save_logits:\n report[\"logits_paths\"] = {\"hf\": hf_logits_path, \"local\": loc_logits_path}\n\n print(json.dumps(report, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"5e465dee5c4e53b58b1b305eb07f5d6468152b1ae2c910110fe4d01ee8b1fbd1","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.trace_llama_parity.main","uri":"program://Program_Conditioned_Adapter/function/scripts.trace_llama_parity.main#L10-L205","kind":"function","name":"main","path":"scripts/trace_llama_parity.py","language":"python","start_line":10,"end_line":205,"context_start_line":1,"context_end_line":211,"code":"import os\nimport json\nimport argparse\nfrom typing import Dict, Any, List\n\nimport torch\n\n\n@torch.no_grad()\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--prompt\", default=\"Hello\")\n p.add_argument(\"--device\", default=\"cuda\" if torch.cuda.is_available() else \"cpu\")\n p.add_argument(\"--dtype\", default=\"bfloat16\")\n p.add_argument(\"--max-layers\", type=int, default=8, help=\"Limit layers to trace for brevity\")\n p.add_argument(\"--save-logits\", action=\"store_true\", help=\"Save HF/local logits to disk to minimize RAM/VRAM use\")\n p.add_argument(\"--out-dir\", default=\"\", help=\"Directory to write saved tensors (defaults to cache-dir/trace)\")\n args = p.parse_args()\n\n os.makedirs(args.cache_dir, exist_ok=True)\n os.environ.setdefault(\"ATTN_BACKEND\", \"torch\")\n\n from transformers import AutoTokenizer, AutoModelForCausalLM # type: ignore\n tok = AutoTokenizer.from_pretrained(args.model, use_fast=True, cache_dir=args.cache_dir)\n x = tok(args.prompt, return_tensors=\"pt\")\n x = {k: v.to(args.device) for k, v in x.items()}\n\n # HF model (match dtype)\n hf_dtype = getattr(torch, args.dtype) if hasattr(torch, args.dtype) else torch.float32\n hf = AutoModelForCausalLM.from_pretrained(\n args.model,\n torch_dtype=hf_dtype,\n device_map={\"\": args.device},\n low_cpu_mem_usage=True,\n cache_dir=args.cache_dir,\n ).eval()\n\n # Build masks and positions exactly once (no cache), compute HF RoPE and per-layer traces first\n input_ids = x[\"input_ids\"]\n attention_mask = x.get(\"attention_mask\", None)\n B, T = int(input_ids.shape[0]), int(input_ids.shape[1])\n\n # HF position_ids and mask\n position_ids_hf = torch.arange(T, device=args.device).unsqueeze(0)\n from transformers.models.llama.modeling_llama import LlamaModel # type: ignore\n # Prepare rotary cos/sin via HF module\n llama_model: LlamaModel = hf.model # type: ignore[attr-defined]\n cos_hf, sin_hf = llama_model.rotary_emb(\n llama_model.embed_tokens(input_ids), position_ids=position_ids_hf\n )\n\n # Run HF forward with hooks to collect per-layer hidden (store on CPU to save VRAM)\n hf_layers_out: List[torch.Tensor] = []\n\n def _hf_layer_hook(module, inputs, output):\n hs = output[0] if isinstance(output, tuple) else output\n hf_layers_out.append(hs.detach().to(device=\"cpu\", dtype=torch.float32))\n\n hooks = []\n try:\n for i, layer in enumerate(llama_model.layers[: int(args.max_layers) ]):\n hooks.append(layer.register_forward_hook(_hf_layer_hook))\n _ = hf(input_ids=input_ids, attention_mask=attention_mask)\n finally:\n for h in hooks:\n try:\n h.remove()\n except Exception:\n pass\n\n # Final HF logits (CPU copy)\n out_hf = hf(input_ids=input_ids, attention_mask=attention_mask)\n logits_hf = out_hf.logits[:, -1, :].to(\"cpu\", torch.float32)\n hf_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n os.makedirs(out_dir, exist_ok=True)\n hf_logits_path = os.path.join(out_dir, \"hf_logits.pt\")\n torch.save(logits_hf, hf_logits_path)\n del logits_hf\n logits_hf = None\n\n # Free HF before building local to avoid OOM\n del out_hf\n del llama_model\n del hf\n try:\n torch.cuda.empty_cache()\n except Exception:\n pass\n\n # Local model (now that HF is freed)\n from model.hf_snapshot import ensure_snapshot\n ckpt_dir = ensure_snapshot(args.model, args.cache_dir)\n from specs.config import ModelConfig\n from model.factory import build_causal_lm\n from model.hf_llama_loader import load_hf_llama_weights_into_local\n cfg = json.load(open(os.path.join(ckpt_dir, \"config.json\"), \"r\", encoding=\"utf-8\"))\n mc = ModelConfig(\n d_model=int(cfg.get(\"hidden_size\")),\n n_heads=int(cfg.get(\"num_attention_heads\")),\n n_layers=int(cfg.get(\"num_hidden_layers\")),\n d_ff=int(cfg.get(\"intermediate_size\")),\n vocab_size=int(cfg.get(\"vocab_size\")),\n head_dim=int(cfg.get(\"head_dim\", int(cfg.get(\"hidden_size\")) // int(cfg.get(\"num_attention_heads\")))),\n rope_theta=float(cfg.get(\"rope_parameters\", {}).get(\"rope_theta\", cfg.get(\"rope_theta\", 1e6))),\n dtype=(args.dtype if args.device.startswith(\"cuda\") else \"float32\"),\n attn_impl=\"sdpa\",\n rms_norm_eps=float(cfg.get(\"rms_norm_eps\", 1e-6)),\n rope_scaling_type=(cfg.get(\"rope_scaling\", {}) or {}).get(\"type\"),\n rope_scaling_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"factor\"),\n rope_scaling_original_max_position_embeddings=(cfg.get(\"rope_scaling\", {}) or {}).get(\"original_max_position_embeddings\"),\n rope_scaling_low_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"low_freq_factor\"),\n rope_scaling_high_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"high_freq_factor\"),\n )\n n_kv_heads = int(cfg.get(\"num_key_value_heads\", mc.n_heads))\n tie_we = bool(cfg.get(\"tie_word_embeddings\", True))\n local = build_causal_lm(mc, block=\"llama\", n_kv_heads=n_kv_heads, tie_weights=tie_we)\n load_hf_llama_weights_into_local(local, ckpt_dir)\n local = local.to(device=args.device, dtype=hf_dtype).eval()\n\n # Local position_ids and mask/cos/sin\n from tensor.masking import create_causal_mask\n from tensor.positional import RotaryEmbeddingHF as LocalRotary\n position_ids_local = torch.arange(T, device=args.device).unsqueeze(0)\n add_mask_local = create_causal_mask(\n input_embeds=local.embed(input_ids),\n attention_mask=attention_mask,\n cache_position=None,\n position_ids=position_ids_local,\n past_key_values=None,\n ) # (B,1,T,S)\n rope = LocalRotary(\n head_dim=int(mc.head_dim or mc.d_model // mc.n_heads),\n base_theta=float(mc.rope_theta),\n attention_scaling=float(getattr(mc, \"rope_attention_scaling\", 1.0) or 1.0),\n device=input_ids.device,\n scaling_type=getattr(mc, \"rope_scaling_type\", None),\n scaling_factor=getattr(mc, \"rope_scaling_factor\", None),\n original_max_position_embeddings=getattr(mc, \"rope_scaling_original_max_position_embeddings\", None),\n low_freq_factor=getattr(mc, \"rope_scaling_low_freq_factor\", None),\n high_freq_factor=getattr(mc, \"rope_scaling_high_freq_factor\", None),\n )\n cos_loc, sin_loc = rope.forward(local.embed(input_ids), position_ids=position_ids_local)\n\n def _stats(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False).view(-1)\n b32 = b.detach().to(torch.float32, copy=False).view(-1)\n diff = (a32 - b32)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())\n mean_abs = float(diff.abs().mean().item())\n denom = float(max(1e-12, a32.norm().item()))\n rel = float(l2 / denom)\n cos = float(torch.nn.functional.cosine_similarity(a32.view(1, -1), b32.view(1, -1)).item())\n return {\"l2\": l2, \"max_abs\": max_abs, \"mean_abs\": mean_abs, \"rel_l2\": rel, \"cos\": cos}\n\n report: Dict[str, Any] = {\"layers\": []}\n\n # Compare masks and RoPE\n report[\"mask\"] = {\n \"local_shape\": tuple(int(s) for s in add_mask_local.shape),\n \"local_inf_counts\": int(torch.isinf(add_mask_local).sum().item()),\n }\n report[\"rope\"] = {\n \"cos\": _stats(cos_loc, cos_hf),\n \"sin\": _stats(sin_loc, sin_hf),\n }\n\n # Run local forward step-by-step to collect per-layer hidden\n local_layers_out: List[torch.Tensor] = []\n x_loc = local.embed(input_ids)\n mask_loc = add_mask_local\n pos_ids = position_ids_local\n for i, blk in enumerate(local.blocks[: int(args.max_layers) ]):\n x_loc = blk(x_loc, mask_loc, None, (cos_loc, sin_loc), pos_ids)\n local_layers_out.append(x_loc.detach().to(device=\"cpu\", dtype=torch.float32))\n\n # Compare per-layer outputs\n for i in range(min(len(hf_layers_out), len(local_layers_out))):\n stats = _stats(local_layers_out[i], hf_layers_out[i])\n report[\"layers\"].append({\"index\": i, **stats})\n\n # Final logits comparison (one step)\n out_loc = local(input_ids=input_ids, attention_mask=attention_mask, return_dict=True)\n logits_loc = out_loc[\"logits\"][:, -1, :].to(\"cpu\", torch.float32)\n loc_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n os.makedirs(out_dir, exist_ok=True)\n loc_logits_path = os.path.join(out_dir, \"local_logits.pt\")\n torch.save(logits_loc, loc_logits_path)\n # Load from disk for stats if we freed memory\n if logits_hf is None and hf_logits_path:\n logits_hf = torch.load(hf_logits_path, map_location=\"cpu\").to(torch.float32)\n if args.save_logits and loc_logits_path is not None:\n # ensure we use the same on-disk tensor for consistency\n logits_loc = torch.load(loc_logits_path, map_location=\"cpu\").to(torch.float32)\n report[\"logits\"] = _stats(logits_loc, logits_hf)\n if args.save_logits:\n report[\"logits_paths\"] = {\"hf\": hf_logits_path, \"local\": loc_logits_path}\n\n print(json.dumps(report, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"5e465dee5c4e53b58b1b305eb07f5d6468152b1ae2c910110fe4d01ee8b1fbd1","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.trace_llama_parity._hf_layer_hook","uri":"program://Program_Conditioned_Adapter/function/scripts.trace_llama_parity._hf_layer_hook#L57-L59","kind":"function","name":"_hf_layer_hook","path":"scripts/trace_llama_parity.py","language":"python","start_line":57,"end_line":59,"context_start_line":37,"context_end_line":79,"code":" cache_dir=args.cache_dir,\n ).eval()\n\n # Build masks and positions exactly once (no cache), compute HF RoPE and per-layer traces first\n input_ids = x[\"input_ids\"]\n attention_mask = x.get(\"attention_mask\", None)\n B, T = int(input_ids.shape[0]), int(input_ids.shape[1])\n\n # HF position_ids and mask\n position_ids_hf = torch.arange(T, device=args.device).unsqueeze(0)\n from transformers.models.llama.modeling_llama import LlamaModel # type: ignore\n # Prepare rotary cos/sin via HF module\n llama_model: LlamaModel = hf.model # type: ignore[attr-defined]\n cos_hf, sin_hf = llama_model.rotary_emb(\n llama_model.embed_tokens(input_ids), position_ids=position_ids_hf\n )\n\n # Run HF forward with hooks to collect per-layer hidden (store on CPU to save VRAM)\n hf_layers_out: List[torch.Tensor] = []\n\n def _hf_layer_hook(module, inputs, output):\n hs = output[0] if isinstance(output, tuple) else output\n hf_layers_out.append(hs.detach().to(device=\"cpu\", dtype=torch.float32))\n\n hooks = []\n try:\n for i, layer in enumerate(llama_model.layers[: int(args.max_layers) ]):\n hooks.append(layer.register_forward_hook(_hf_layer_hook))\n _ = hf(input_ids=input_ids, attention_mask=attention_mask)\n finally:\n for h in hooks:\n try:\n h.remove()\n except Exception:\n pass\n\n # Final HF logits (CPU copy)\n out_hf = hf(input_ids=input_ids, attention_mask=attention_mask)\n logits_hf = out_hf.logits[:, -1, :].to(\"cpu\", torch.float32)\n hf_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n os.makedirs(out_dir, exist_ok=True)","source_hash":"5e465dee5c4e53b58b1b305eb07f5d6468152b1ae2c910110fe4d01ee8b1fbd1","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.trace_llama_parity._stats","uri":"program://Program_Conditioned_Adapter/function/scripts.trace_llama_parity._stats#L148-L158","kind":"function","name":"_stats","path":"scripts/trace_llama_parity.py","language":"python","start_line":148,"end_line":158,"context_start_line":128,"context_end_line":178,"code":" add_mask_local = create_causal_mask(\n input_embeds=local.embed(input_ids),\n attention_mask=attention_mask,\n cache_position=None,\n position_ids=position_ids_local,\n past_key_values=None,\n ) # (B,1,T,S)\n rope = LocalRotary(\n head_dim=int(mc.head_dim or mc.d_model // mc.n_heads),\n base_theta=float(mc.rope_theta),\n attention_scaling=float(getattr(mc, \"rope_attention_scaling\", 1.0) or 1.0),\n device=input_ids.device,\n scaling_type=getattr(mc, \"rope_scaling_type\", None),\n scaling_factor=getattr(mc, \"rope_scaling_factor\", None),\n original_max_position_embeddings=getattr(mc, \"rope_scaling_original_max_position_embeddings\", None),\n low_freq_factor=getattr(mc, \"rope_scaling_low_freq_factor\", None),\n high_freq_factor=getattr(mc, \"rope_scaling_high_freq_factor\", None),\n )\n cos_loc, sin_loc = rope.forward(local.embed(input_ids), position_ids=position_ids_local)\n\n def _stats(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False).view(-1)\n b32 = b.detach().to(torch.float32, copy=False).view(-1)\n diff = (a32 - b32)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())\n mean_abs = float(diff.abs().mean().item())\n denom = float(max(1e-12, a32.norm().item()))\n rel = float(l2 / denom)\n cos = float(torch.nn.functional.cosine_similarity(a32.view(1, -1), b32.view(1, -1)).item())\n return {\"l2\": l2, \"max_abs\": max_abs, \"mean_abs\": mean_abs, \"rel_l2\": rel, \"cos\": cos}\n\n report: Dict[str, Any] = {\"layers\": []}\n\n # Compare masks and RoPE\n report[\"mask\"] = {\n \"local_shape\": tuple(int(s) for s in add_mask_local.shape),\n \"local_inf_counts\": int(torch.isinf(add_mask_local).sum().item()),\n }\n report[\"rope\"] = {\n \"cos\": _stats(cos_loc, cos_hf),\n \"sin\": _stats(sin_loc, sin_hf),\n }\n\n # Run local forward step-by-step to collect per-layer hidden\n local_layers_out: List[torch.Tensor] = []\n x_loc = local.embed(input_ids)\n mask_loc = add_mask_local\n pos_ids = position_ids_local\n for i, blk in enumerate(local.blocks[: int(args.max_layers) ]):\n x_loc = blk(x_loc, mask_loc, None, (cos_loc, sin_loc), pos_ids)","source_hash":"5e465dee5c4e53b58b1b305eb07f5d6468152b1ae2c910110fe4d01ee8b1fbd1","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.compare_tokenizers","uri":"program://Program_Conditioned_Adapter/module/scripts.compare_tokenizers#L1-L100","kind":"module","name":"scripts.compare_tokenizers","path":"scripts/compare_tokenizers.py","language":"python","start_line":1,"end_line":100,"context_start_line":1,"context_end_line":100,"code":"import os\nimport json\nfrom typing import List, Dict\n\n\ndef main() -> None:\n import argparse\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", required=True, help=\"HF model id or local snapshot path\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--samples\", type=int, default=0, help=\"Sample count from a default suite (0 uses all)\")\n p.add_argument(\"--backend\", default=\"local\", choices=[\"hf\",\"local\",\"pure\"], help=\"Which local tokenizer backend to compare against HF\")\n args = p.parse_args()\n\n from model.hf_snapshot import ensure_snapshot\n snap = ensure_snapshot(args.model, args.cache_dir)\n\n # HF reference tokenizer\n try:\n from transformers import AutoTokenizer # type: ignore\n except Exception as e:\n raise RuntimeError(\"Install transformers to run the comparison script\") from e\n hf_tok = AutoTokenizer.from_pretrained(args.model, use_fast=True, cache_dir=args.cache_dir)\n if getattr(hf_tok, \"pad_token\", None) is None:\n try:\n hf_tok.pad_token = hf_tok.eos_token\n except Exception:\n pass\n\n # Local tokenizer\n if args.backend == \"pure\":\n from data.tokenizer import PureLlamaTokenizer\n local_tok = PureLlamaTokenizer(snap)\n else:\n from data.tokenizer import LocalLlamaTokenizer\n local_tok = LocalLlamaTokenizer(snap)\n\n tests: List[str] = [\n \"Hello, world\",\n \"The quick brown fox jumps over 13 lazy dogs.\",\n \" leading and multiple spaces \",\n \"tabs\\tand\\nnewlines\\n\",\n \"emoji 😀😃😄 😁🖖🏽\",\n \"中文測試 — mixed-width punctuation、!\",\n \"العَرَبِيَّةُ لغةٌ جميلةٌ\",\n \"कृत्रिम बुद्धिमत्ता से नमस्ते\",\n \"geschichten\u00119\u00119\u00119\",\n \"<|begin_of_text|> special token check <|end_of_text|>\",\n ]\n if args.samples and args.samples > 0:\n tests = tests[: int(args.samples)]\n\n results: Dict[str, Dict] = {}\n ok = 0\n for t in tests:\n hf_ids = list(hf_tok.encode(t, add_special_tokens=False))\n loc_ids = local_tok.encode(t)\n enc_eq = (hf_ids == loc_ids)\n # Decode both from HF ids for stable baseline\n hf_dec = hf_tok.decode(hf_ids)\n loc_dec_from_hf = local_tok.decode(hf_ids)\n # Decode both from local ids\n loc_dec = local_tok.decode(loc_ids)\n hf_dec_from_loc = hf_tok.decode(loc_ids)\n results[t] = {\n \"enc_equal\": enc_eq,\n \"hf_ids\": hf_ids[:64],\n \"loc_ids\": loc_ids[:64],\n \"hf_dec\": hf_dec,\n \"loc_dec_from_hf\": loc_dec_from_hf,\n \"loc_dec\": loc_dec,\n \"hf_dec_from_loc\": hf_dec_from_loc,\n }\n ok += int(enc_eq)\n\n out = {\n \"snapshot\": snap,\n \"hf_special\": {\n \"bos\": getattr(hf_tok, \"bos_token_id\", None),\n \"eos\": getattr(hf_tok, \"eos_token_id\", None),\n \"unk\": getattr(hf_tok, \"unk_token_id\", None),\n \"pad\": getattr(hf_tok, \"pad_token_id\", None),\n },\n \"local_special\": {\n \"bos\": local_tok.bos_token_id,\n \"eos\": local_tok.eos_token_id,\n \"unk\": local_tok.unk_token_id,\n \"pad\": local_tok.pad_token_id,\n },\n \"tests\": results,\n \"enc_equal_count\": ok,\n \"enc_total\": len(tests),\n }\n print(json.dumps(out, ensure_ascii=False, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"573051bcd0f55c9b3d6e603a11bea0d116be369fd5f8e3c2eca701df323b6e37","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.compare_tokenizers.main","uri":"program://Program_Conditioned_Adapter/function/scripts.compare_tokenizers.main#L6-L94","kind":"function","name":"main","path":"scripts/compare_tokenizers.py","language":"python","start_line":6,"end_line":94,"context_start_line":1,"context_end_line":100,"code":"import os\nimport json\nfrom typing import List, Dict\n\n\ndef main() -> None:\n import argparse\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", required=True, help=\"HF model id or local snapshot path\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--samples\", type=int, default=0, help=\"Sample count from a default suite (0 uses all)\")\n p.add_argument(\"--backend\", default=\"local\", choices=[\"hf\",\"local\",\"pure\"], help=\"Which local tokenizer backend to compare against HF\")\n args = p.parse_args()\n\n from model.hf_snapshot import ensure_snapshot\n snap = ensure_snapshot(args.model, args.cache_dir)\n\n # HF reference tokenizer\n try:\n from transformers import AutoTokenizer # type: ignore\n except Exception as e:\n raise RuntimeError(\"Install transformers to run the comparison script\") from e\n hf_tok = AutoTokenizer.from_pretrained(args.model, use_fast=True, cache_dir=args.cache_dir)\n if getattr(hf_tok, \"pad_token\", None) is None:\n try:\n hf_tok.pad_token = hf_tok.eos_token\n except Exception:\n pass\n\n # Local tokenizer\n if args.backend == \"pure\":\n from data.tokenizer import PureLlamaTokenizer\n local_tok = PureLlamaTokenizer(snap)\n else:\n from data.tokenizer import LocalLlamaTokenizer\n local_tok = LocalLlamaTokenizer(snap)\n\n tests: List[str] = [\n \"Hello, world\",\n \"The quick brown fox jumps over 13 lazy dogs.\",\n \" leading and multiple spaces \",\n \"tabs\\tand\\nnewlines\\n\",\n \"emoji 😀😃😄 😁🖖🏽\",\n \"中文測試 — mixed-width punctuation、!\",\n \"العَرَبِيَّةُ لغةٌ جميلةٌ\",\n \"कृत्रिम बुद्धिमत्ता से नमस्ते\",\n \"geschichten\u00119\u00119\u00119\",\n \"<|begin_of_text|> special token check <|end_of_text|>\",\n ]\n if args.samples and args.samples > 0:\n tests = tests[: int(args.samples)]\n\n results: Dict[str, Dict] = {}\n ok = 0\n for t in tests:\n hf_ids = list(hf_tok.encode(t, add_special_tokens=False))\n loc_ids = local_tok.encode(t)\n enc_eq = (hf_ids == loc_ids)\n # Decode both from HF ids for stable baseline\n hf_dec = hf_tok.decode(hf_ids)\n loc_dec_from_hf = local_tok.decode(hf_ids)\n # Decode both from local ids\n loc_dec = local_tok.decode(loc_ids)\n hf_dec_from_loc = hf_tok.decode(loc_ids)\n results[t] = {\n \"enc_equal\": enc_eq,\n \"hf_ids\": hf_ids[:64],\n \"loc_ids\": loc_ids[:64],\n \"hf_dec\": hf_dec,\n \"loc_dec_from_hf\": loc_dec_from_hf,\n \"loc_dec\": loc_dec,\n \"hf_dec_from_loc\": hf_dec_from_loc,\n }\n ok += int(enc_eq)\n\n out = {\n \"snapshot\": snap,\n \"hf_special\": {\n \"bos\": getattr(hf_tok, \"bos_token_id\", None),\n \"eos\": getattr(hf_tok, \"eos_token_id\", None),\n \"unk\": getattr(hf_tok, \"unk_token_id\", None),\n \"pad\": getattr(hf_tok, \"pad_token_id\", None),\n },\n \"local_special\": {\n \"bos\": local_tok.bos_token_id,\n \"eos\": local_tok.eos_token_id,\n \"unk\": local_tok.unk_token_id,\n \"pad\": local_tok.pad_token_id,\n },\n \"tests\": results,\n \"enc_equal_count\": ok,\n \"enc_total\": len(tests),\n }\n print(json.dumps(out, ensure_ascii=False, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"573051bcd0f55c9b3d6e603a11bea0d116be369fd5f8e3c2eca701df323b6e37","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.diff_hf_local_weights","uri":"program://Program_Conditioned_Adapter/module/scripts.diff_hf_local_weights#L1-L173","kind":"module","name":"scripts.diff_hf_local_weights","path":"scripts/diff_hf_local_weights.py","language":"python","start_line":1,"end_line":173,"context_start_line":1,"context_end_line":173,"code":"import os\nimport json\nimport argparse\nfrom typing import Dict, List, Tuple\n\nimport torch\n\n\ndef _ensure_snapshot(model_id: str, cache_dir: str) -> str:\n if os.path.isdir(model_id) and os.path.isfile(os.path.join(model_id, \"config.json\")):\n return model_id\n org_name = model_id.strip().split(\"/\")[-2:]\n if len(org_name) == 2:\n org, name = org_name\n dir1 = os.path.join(cache_dir, f\"models--{org}--{name}\", \"snapshots\")\n cands = []\n if os.path.isdir(dir1):\n cands.extend([os.path.join(dir1, d) for d in os.listdir(dir1)])\n cands = [p for p in cands if os.path.isfile(os.path.join(p, \"model.safetensors.index.json\"))]\n if cands:\n cands.sort(key=lambda p: os.path.getmtime(p), reverse=True)\n return cands[0]\n from huggingface_hub import snapshot_download # type: ignore\n return snapshot_download(repo_id=model_id, cache_dir=cache_dir)\n\n\ndef _index_shards(model_dir: str) -> Dict[str, List[str]]:\n index_fp = os.path.join(model_dir, \"model.safetensors.index.json\")\n if not os.path.isfile(index_fp):\n raise FileNotFoundError(f\"Missing index file: {index_fp}\")\n with open(index_fp, \"r\", encoding=\"utf-8\") as fh:\n index_obj = json.load(fh)\n weight_map: Dict[str, str] = index_obj.get(\"weight_map\", {})\n by_file: Dict[str, List[str]] = {}\n for k, fn in weight_map.items():\n by_file.setdefault(fn, []).append(k)\n return by_file\n\n\ndef _read_hf_tensor(model_dir: str, key: str) -> torch.Tensor:\n from safetensors import safe_open # type: ignore\n by_file = _index_shards(model_dir)\n for shard_rel, keys in by_file.items():\n if key not in keys:\n continue\n fp = os.path.join(model_dir, shard_rel)\n with safe_open(fp, framework=\"pt\", device=\"cpu\") as f: # type: ignore\n return f.get_tensor(key) # type: ignore[attr-defined]\n raise KeyError(f\"tensor {key} not found in shards\")\n\n\ndef _metrics(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False)\n b32 = b.detach().to(torch.float32, copy=False)\n diff = (a32 - b32).view(-1)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())\n denom = float(max(1e-12, a32.norm().item()))\n rel = float(l2 / denom)\n cos = float((torch.nn.functional.cosine_similarity(a32.view(1, -1), b32.view(1, -1))).item())\n return {\"l2\": l2, \"max_abs\": max_abs, \"rel_l2\": rel, \"cos\": cos}\n\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--layers\", default=\"0\", help=\"Comma-separated layer indices to compare\")\n args = p.parse_args()\n\n os.makedirs(args.cache_dir, exist_ok=True)\n ckpt = _ensure_snapshot(args.model, args.cache_dir)\n\n # Build local model and load HF weights via our loader\n from specs.config import ModelConfig\n from model.factory import build_causal_lm\n from model.hf_llama_loader import load_hf_llama_weights_into_local\n\n cfg = json.load(open(os.path.join(ckpt, \"config.json\"), \"r\", encoding=\"utf-8\"))\n use_cuda = torch.cuda.is_available()\n mc = ModelConfig(\n d_model=int(cfg.get(\"hidden_size\")),\n n_heads=int(cfg.get(\"num_attention_heads\")),\n n_layers=int(cfg.get(\"num_hidden_layers\")),\n d_ff=int(cfg.get(\"intermediate_size\")),\n vocab_size=int(cfg.get(\"vocab_size\")),\n head_dim=int(cfg.get(\"head_dim\", int(cfg.get(\"hidden_size\")) // int(cfg.get(\"num_attention_heads\")))),\n rope_theta=float(cfg.get(\"rope_parameters\", {}).get(\"rope_theta\", cfg.get(\"rope_theta\", 1e6))),\n dtype=(\"bfloat16\" if use_cuda else \"float32\"),\n attn_impl=\"sdpa\",\n rms_norm_eps=float(cfg.get(\"rms_norm_eps\", 1e-6)),\n rope_scaling_type=(cfg.get(\"rope_scaling\", {}) or {}).get(\"type\"),\n rope_scaling_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"factor\"),\n rope_scaling_original_max_position_embeddings=(cfg.get(\"rope_scaling\", {}) or {}).get(\"original_max_position_embeddings\"),\n rope_scaling_low_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"low_freq_factor\"),\n rope_scaling_high_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"high_freq_factor\"),\n )\n n_kv_heads = int(cfg.get(\"num_key_value_heads\", mc.n_heads))\n tie_we = bool(cfg.get(\"tie_word_embeddings\", True))\n local = build_causal_lm(mc, block=\"llama\", n_kv_heads=n_kv_heads, tie_weights=tie_we)\n load_hf_llama_weights_into_local(local, ckpt)\n local = (local.to(device=\"cuda\", dtype=torch.bfloat16) if use_cuda else local.to(device=\"cpu\", dtype=torch.float32)).eval()\n\n report: Dict[str, Dict[str, float]] = {}\n\n # Embedding and lm_head\n try:\n hf_embed = _read_hf_tensor(ckpt, \"model.embed_tokens.weight\").to(\"cpu\")\n report[\"embed.weight\"] = _metrics(local.embed.weight.data.detach().to(\"cpu\"), hf_embed)\n except Exception:\n report[\"embed.weight\"] = {\"error\": 1.0} # type: ignore\n try:\n hf_lm = _read_hf_tensor(ckpt, \"lm_head.weight\").to(\"cpu\")\n report[\"lm_head.weight\"] = _metrics(local.lm_head.weight.data.detach().to(\"cpu\"), hf_lm)\n except Exception:\n report[\"lm_head.weight\"] = {\"error\": 1.0} # type: ignore\n\n layers = [int(x.strip()) for x in str(args.layers).split(\",\") if x.strip()]\n for li in layers:\n key = f\"layers.{li}\"\n try:\n blk = local.blocks[li]\n except Exception:\n report[key] = {\"error\": 1.0} # type: ignore\n continue\n\n # Attention projections\n for short, hf_name, local_w in (\n (\"q_proj\", f\"model.layers.{li}.self_attn.q_proj.weight\", blk.attn.w_q.weight),\n (\"k_proj\", f\"model.layers.{li}.self_attn.k_proj.weight\", blk.attn.w_k.weight),\n (\"v_proj\", f\"model.layers.{li}.self_attn.v_proj.weight\", blk.attn.w_v.weight),\n (\"o_proj\", f\"model.layers.{li}.self_attn.o_proj.weight\", blk.attn.w_o.weight),\n ):\n try:\n hf_t = _read_hf_tensor(ckpt, hf_name).to(\"cpu\")\n report[f\"{key}.{short}\"] = _metrics(local_w.data.detach().to(\"cpu\"), hf_t)\n except Exception:\n report[f\"{key}.{short}\"] = {\"error\": 1.0} # type: ignore\n\n # Layer norms\n for short, hf_name, local_w in (\n (\"n1\", f\"model.layers.{li}.input_layernorm.weight\", blk.n1.weight),\n (\"n2\", f\"model.layers.{li}.post_attention_layernorm.weight\", blk.n2.weight),\n ):\n try:\n hf_t = _read_hf_tensor(ckpt, hf_name).to(\"cpu\")\n report[f\"{key}.{short}\"] = _metrics(local_w.data.detach().to(\"cpu\"), hf_t)\n except Exception:\n report[f\"{key}.{short}\"] = {\"error\": 1.0} # type: ignore\n\n # MLP down (w_out)\n try:\n hf_down = _read_hf_tensor(ckpt, f\"model.layers.{li}.mlp.down_proj.weight\").to(\"cpu\")\n report[f\"{key}.mlp.down\"] = _metrics(blk.mlp.w_out.weight.data.detach().to(\"cpu\"), hf_down)\n except Exception:\n report[f\"{key}.mlp.down\"] = {\"error\": 1.0} # type: ignore\n\n # MLP fused w_in vs concat(gate, up)\n try:\n hf_gate = _read_hf_tensor(ckpt, f\"model.layers.{li}.mlp.gate_proj.weight\").to(\"cpu\")\n hf_up = _read_hf_tensor(ckpt, f\"model.layers.{li}.mlp.up_proj.weight\").to(\"cpu\")\n fused = torch.cat([hf_gate, hf_up], dim=0).contiguous()\n report[f\"{key}.mlp.w_in\"] = _metrics(blk.mlp.w_in.weight.data.detach().to(\"cpu\"), fused)\n except Exception:\n report[f\"{key}.mlp.w_in\"] = {\"error\": 1.0} # type: ignore\n\n print(json.dumps(report, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"138a3eaaa0b0d1529dc8346d14adec02d84cc753d5d0f3625decdd3d1032321a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.diff_hf_local_weights._ensure_snapshot","uri":"program://Program_Conditioned_Adapter/function/scripts.diff_hf_local_weights._ensure_snapshot#L9-L24","kind":"function","name":"_ensure_snapshot","path":"scripts/diff_hf_local_weights.py","language":"python","start_line":9,"end_line":24,"context_start_line":1,"context_end_line":44,"code":"import os\nimport json\nimport argparse\nfrom typing import Dict, List, Tuple\n\nimport torch\n\n\ndef _ensure_snapshot(model_id: str, cache_dir: str) -> str:\n if os.path.isdir(model_id) and os.path.isfile(os.path.join(model_id, \"config.json\")):\n return model_id\n org_name = model_id.strip().split(\"/\")[-2:]\n if len(org_name) == 2:\n org, name = org_name\n dir1 = os.path.join(cache_dir, f\"models--{org}--{name}\", \"snapshots\")\n cands = []\n if os.path.isdir(dir1):\n cands.extend([os.path.join(dir1, d) for d in os.listdir(dir1)])\n cands = [p for p in cands if os.path.isfile(os.path.join(p, \"model.safetensors.index.json\"))]\n if cands:\n cands.sort(key=lambda p: os.path.getmtime(p), reverse=True)\n return cands[0]\n from huggingface_hub import snapshot_download # type: ignore\n return snapshot_download(repo_id=model_id, cache_dir=cache_dir)\n\n\ndef _index_shards(model_dir: str) -> Dict[str, List[str]]:\n index_fp = os.path.join(model_dir, \"model.safetensors.index.json\")\n if not os.path.isfile(index_fp):\n raise FileNotFoundError(f\"Missing index file: {index_fp}\")\n with open(index_fp, \"r\", encoding=\"utf-8\") as fh:\n index_obj = json.load(fh)\n weight_map: Dict[str, str] = index_obj.get(\"weight_map\", {})\n by_file: Dict[str, List[str]] = {}\n for k, fn in weight_map.items():\n by_file.setdefault(fn, []).append(k)\n return by_file\n\n\ndef _read_hf_tensor(model_dir: str, key: str) -> torch.Tensor:\n from safetensors import safe_open # type: ignore\n by_file = _index_shards(model_dir)\n for shard_rel, keys in by_file.items():\n if key not in keys:","source_hash":"138a3eaaa0b0d1529dc8346d14adec02d84cc753d5d0f3625decdd3d1032321a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.diff_hf_local_weights._index_shards","uri":"program://Program_Conditioned_Adapter/function/scripts.diff_hf_local_weights._index_shards#L27-L37","kind":"function","name":"_index_shards","path":"scripts/diff_hf_local_weights.py","language":"python","start_line":27,"end_line":37,"context_start_line":7,"context_end_line":57,"code":"\n\ndef _ensure_snapshot(model_id: str, cache_dir: str) -> str:\n if os.path.isdir(model_id) and os.path.isfile(os.path.join(model_id, \"config.json\")):\n return model_id\n org_name = model_id.strip().split(\"/\")[-2:]\n if len(org_name) == 2:\n org, name = org_name\n dir1 = os.path.join(cache_dir, f\"models--{org}--{name}\", \"snapshots\")\n cands = []\n if os.path.isdir(dir1):\n cands.extend([os.path.join(dir1, d) for d in os.listdir(dir1)])\n cands = [p for p in cands if os.path.isfile(os.path.join(p, \"model.safetensors.index.json\"))]\n if cands:\n cands.sort(key=lambda p: os.path.getmtime(p), reverse=True)\n return cands[0]\n from huggingface_hub import snapshot_download # type: ignore\n return snapshot_download(repo_id=model_id, cache_dir=cache_dir)\n\n\ndef _index_shards(model_dir: str) -> Dict[str, List[str]]:\n index_fp = os.path.join(model_dir, \"model.safetensors.index.json\")\n if not os.path.isfile(index_fp):\n raise FileNotFoundError(f\"Missing index file: {index_fp}\")\n with open(index_fp, \"r\", encoding=\"utf-8\") as fh:\n index_obj = json.load(fh)\n weight_map: Dict[str, str] = index_obj.get(\"weight_map\", {})\n by_file: Dict[str, List[str]] = {}\n for k, fn in weight_map.items():\n by_file.setdefault(fn, []).append(k)\n return by_file\n\n\ndef _read_hf_tensor(model_dir: str, key: str) -> torch.Tensor:\n from safetensors import safe_open # type: ignore\n by_file = _index_shards(model_dir)\n for shard_rel, keys in by_file.items():\n if key not in keys:\n continue\n fp = os.path.join(model_dir, shard_rel)\n with safe_open(fp, framework=\"pt\", device=\"cpu\") as f: # type: ignore\n return f.get_tensor(key) # type: ignore[attr-defined]\n raise KeyError(f\"tensor {key} not found in shards\")\n\n\ndef _metrics(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False)\n b32 = b.detach().to(torch.float32, copy=False)\n diff = (a32 - b32).view(-1)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())","source_hash":"138a3eaaa0b0d1529dc8346d14adec02d84cc753d5d0f3625decdd3d1032321a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.diff_hf_local_weights._read_hf_tensor","uri":"program://Program_Conditioned_Adapter/function/scripts.diff_hf_local_weights._read_hf_tensor#L40-L49","kind":"function","name":"_read_hf_tensor","path":"scripts/diff_hf_local_weights.py","language":"python","start_line":40,"end_line":49,"context_start_line":20,"context_end_line":69,"code":" if cands:\n cands.sort(key=lambda p: os.path.getmtime(p), reverse=True)\n return cands[0]\n from huggingface_hub import snapshot_download # type: ignore\n return snapshot_download(repo_id=model_id, cache_dir=cache_dir)\n\n\ndef _index_shards(model_dir: str) -> Dict[str, List[str]]:\n index_fp = os.path.join(model_dir, \"model.safetensors.index.json\")\n if not os.path.isfile(index_fp):\n raise FileNotFoundError(f\"Missing index file: {index_fp}\")\n with open(index_fp, \"r\", encoding=\"utf-8\") as fh:\n index_obj = json.load(fh)\n weight_map: Dict[str, str] = index_obj.get(\"weight_map\", {})\n by_file: Dict[str, List[str]] = {}\n for k, fn in weight_map.items():\n by_file.setdefault(fn, []).append(k)\n return by_file\n\n\ndef _read_hf_tensor(model_dir: str, key: str) -> torch.Tensor:\n from safetensors import safe_open # type: ignore\n by_file = _index_shards(model_dir)\n for shard_rel, keys in by_file.items():\n if key not in keys:\n continue\n fp = os.path.join(model_dir, shard_rel)\n with safe_open(fp, framework=\"pt\", device=\"cpu\") as f: # type: ignore\n return f.get_tensor(key) # type: ignore[attr-defined]\n raise KeyError(f\"tensor {key} not found in shards\")\n\n\ndef _metrics(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False)\n b32 = b.detach().to(torch.float32, copy=False)\n diff = (a32 - b32).view(-1)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())\n denom = float(max(1e-12, a32.norm().item()))\n rel = float(l2 / denom)\n cos = float((torch.nn.functional.cosine_similarity(a32.view(1, -1), b32.view(1, -1))).item())\n return {\"l2\": l2, \"max_abs\": max_abs, \"rel_l2\": rel, \"cos\": cos}\n\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--layers\", default=\"0\", help=\"Comma-separated layer indices to compare\")\n args = p.parse_args()","source_hash":"138a3eaaa0b0d1529dc8346d14adec02d84cc753d5d0f3625decdd3d1032321a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.diff_hf_local_weights._metrics","uri":"program://Program_Conditioned_Adapter/function/scripts.diff_hf_local_weights._metrics#L52-L61","kind":"function","name":"_metrics","path":"scripts/diff_hf_local_weights.py","language":"python","start_line":52,"end_line":61,"context_start_line":32,"context_end_line":81,"code":" index_obj = json.load(fh)\n weight_map: Dict[str, str] = index_obj.get(\"weight_map\", {})\n by_file: Dict[str, List[str]] = {}\n for k, fn in weight_map.items():\n by_file.setdefault(fn, []).append(k)\n return by_file\n\n\ndef _read_hf_tensor(model_dir: str, key: str) -> torch.Tensor:\n from safetensors import safe_open # type: ignore\n by_file = _index_shards(model_dir)\n for shard_rel, keys in by_file.items():\n if key not in keys:\n continue\n fp = os.path.join(model_dir, shard_rel)\n with safe_open(fp, framework=\"pt\", device=\"cpu\") as f: # type: ignore\n return f.get_tensor(key) # type: ignore[attr-defined]\n raise KeyError(f\"tensor {key} not found in shards\")\n\n\ndef _metrics(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False)\n b32 = b.detach().to(torch.float32, copy=False)\n diff = (a32 - b32).view(-1)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())\n denom = float(max(1e-12, a32.norm().item()))\n rel = float(l2 / denom)\n cos = float((torch.nn.functional.cosine_similarity(a32.view(1, -1), b32.view(1, -1))).item())\n return {\"l2\": l2, \"max_abs\": max_abs, \"rel_l2\": rel, \"cos\": cos}\n\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--layers\", default=\"0\", help=\"Comma-separated layer indices to compare\")\n args = p.parse_args()\n\n os.makedirs(args.cache_dir, exist_ok=True)\n ckpt = _ensure_snapshot(args.model, args.cache_dir)\n\n # Build local model and load HF weights via our loader\n from specs.config import ModelConfig\n from model.factory import build_causal_lm\n from model.hf_llama_loader import load_hf_llama_weights_into_local\n\n cfg = json.load(open(os.path.join(ckpt, \"config.json\"), \"r\", encoding=\"utf-8\"))\n use_cuda = torch.cuda.is_available()\n mc = ModelConfig(","source_hash":"138a3eaaa0b0d1529dc8346d14adec02d84cc753d5d0f3625decdd3d1032321a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.diff_hf_local_weights.main","uri":"program://Program_Conditioned_Adapter/function/scripts.diff_hf_local_weights.main#L64-L167","kind":"function","name":"main","path":"scripts/diff_hf_local_weights.py","language":"python","start_line":64,"end_line":167,"context_start_line":44,"context_end_line":173,"code":" if key not in keys:\n continue\n fp = os.path.join(model_dir, shard_rel)\n with safe_open(fp, framework=\"pt\", device=\"cpu\") as f: # type: ignore\n return f.get_tensor(key) # type: ignore[attr-defined]\n raise KeyError(f\"tensor {key} not found in shards\")\n\n\ndef _metrics(a: torch.Tensor, b: torch.Tensor) -> Dict[str, float]:\n a32 = a.detach().to(torch.float32, copy=False)\n b32 = b.detach().to(torch.float32, copy=False)\n diff = (a32 - b32).view(-1)\n l2 = float(diff.norm().item())\n max_abs = float(diff.abs().max().item())\n denom = float(max(1e-12, a32.norm().item()))\n rel = float(l2 / denom)\n cos = float((torch.nn.functional.cosine_similarity(a32.view(1, -1), b32.view(1, -1))).item())\n return {\"l2\": l2, \"max_abs\": max_abs, \"rel_l2\": rel, \"cos\": cos}\n\n\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--layers\", default=\"0\", help=\"Comma-separated layer indices to compare\")\n args = p.parse_args()\n\n os.makedirs(args.cache_dir, exist_ok=True)\n ckpt = _ensure_snapshot(args.model, args.cache_dir)\n\n # Build local model and load HF weights via our loader\n from specs.config import ModelConfig\n from model.factory import build_causal_lm\n from model.hf_llama_loader import load_hf_llama_weights_into_local\n\n cfg = json.load(open(os.path.join(ckpt, \"config.json\"), \"r\", encoding=\"utf-8\"))\n use_cuda = torch.cuda.is_available()\n mc = ModelConfig(\n d_model=int(cfg.get(\"hidden_size\")),\n n_heads=int(cfg.get(\"num_attention_heads\")),\n n_layers=int(cfg.get(\"num_hidden_layers\")),\n d_ff=int(cfg.get(\"intermediate_size\")),\n vocab_size=int(cfg.get(\"vocab_size\")),\n head_dim=int(cfg.get(\"head_dim\", int(cfg.get(\"hidden_size\")) // int(cfg.get(\"num_attention_heads\")))),\n rope_theta=float(cfg.get(\"rope_parameters\", {}).get(\"rope_theta\", cfg.get(\"rope_theta\", 1e6))),\n dtype=(\"bfloat16\" if use_cuda else \"float32\"),\n attn_impl=\"sdpa\",\n rms_norm_eps=float(cfg.get(\"rms_norm_eps\", 1e-6)),\n rope_scaling_type=(cfg.get(\"rope_scaling\", {}) or {}).get(\"type\"),\n rope_scaling_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"factor\"),\n rope_scaling_original_max_position_embeddings=(cfg.get(\"rope_scaling\", {}) or {}).get(\"original_max_position_embeddings\"),\n rope_scaling_low_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"low_freq_factor\"),\n rope_scaling_high_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"high_freq_factor\"),\n )\n n_kv_heads = int(cfg.get(\"num_key_value_heads\", mc.n_heads))\n tie_we = bool(cfg.get(\"tie_word_embeddings\", True))\n local = build_causal_lm(mc, block=\"llama\", n_kv_heads=n_kv_heads, tie_weights=tie_we)\n load_hf_llama_weights_into_local(local, ckpt)\n local = (local.to(device=\"cuda\", dtype=torch.bfloat16) if use_cuda else local.to(device=\"cpu\", dtype=torch.float32)).eval()\n\n report: Dict[str, Dict[str, float]] = {}\n\n # Embedding and lm_head\n try:\n hf_embed = _read_hf_tensor(ckpt, \"model.embed_tokens.weight\").to(\"cpu\")\n report[\"embed.weight\"] = _metrics(local.embed.weight.data.detach().to(\"cpu\"), hf_embed)\n except Exception:\n report[\"embed.weight\"] = {\"error\": 1.0} # type: ignore\n try:\n hf_lm = _read_hf_tensor(ckpt, \"lm_head.weight\").to(\"cpu\")\n report[\"lm_head.weight\"] = _metrics(local.lm_head.weight.data.detach().to(\"cpu\"), hf_lm)\n except Exception:\n report[\"lm_head.weight\"] = {\"error\": 1.0} # type: ignore\n\n layers = [int(x.strip()) for x in str(args.layers).split(\",\") if x.strip()]\n for li in layers:\n key = f\"layers.{li}\"\n try:\n blk = local.blocks[li]\n except Exception:\n report[key] = {\"error\": 1.0} # type: ignore\n continue\n\n # Attention projections\n for short, hf_name, local_w in (\n (\"q_proj\", f\"model.layers.{li}.self_attn.q_proj.weight\", blk.attn.w_q.weight),\n (\"k_proj\", f\"model.layers.{li}.self_attn.k_proj.weight\", blk.attn.w_k.weight),\n (\"v_proj\", f\"model.layers.{li}.self_attn.v_proj.weight\", blk.attn.w_v.weight),\n (\"o_proj\", f\"model.layers.{li}.self_attn.o_proj.weight\", blk.attn.w_o.weight),\n ):\n try:\n hf_t = _read_hf_tensor(ckpt, hf_name).to(\"cpu\")\n report[f\"{key}.{short}\"] = _metrics(local_w.data.detach().to(\"cpu\"), hf_t)\n except Exception:\n report[f\"{key}.{short}\"] = {\"error\": 1.0} # type: ignore\n\n # Layer norms\n for short, hf_name, local_w in (\n (\"n1\", f\"model.layers.{li}.input_layernorm.weight\", blk.n1.weight),\n (\"n2\", f\"model.layers.{li}.post_attention_layernorm.weight\", blk.n2.weight),\n ):\n try:\n hf_t = _read_hf_tensor(ckpt, hf_name).to(\"cpu\")\n report[f\"{key}.{short}\"] = _metrics(local_w.data.detach().to(\"cpu\"), hf_t)\n except Exception:\n report[f\"{key}.{short}\"] = {\"error\": 1.0} # type: ignore\n\n # MLP down (w_out)\n try:\n hf_down = _read_hf_tensor(ckpt, f\"model.layers.{li}.mlp.down_proj.weight\").to(\"cpu\")\n report[f\"{key}.mlp.down\"] = _metrics(blk.mlp.w_out.weight.data.detach().to(\"cpu\"), hf_down)\n except Exception:\n report[f\"{key}.mlp.down\"] = {\"error\": 1.0} # type: ignore\n\n # MLP fused w_in vs concat(gate, up)\n try:\n hf_gate = _read_hf_tensor(ckpt, f\"model.layers.{li}.mlp.gate_proj.weight\").to(\"cpu\")\n hf_up = _read_hf_tensor(ckpt, f\"model.layers.{li}.mlp.up_proj.weight\").to(\"cpu\")\n fused = torch.cat([hf_gate, hf_up], dim=0).contiguous()\n report[f\"{key}.mlp.w_in\"] = _metrics(blk.mlp.w_in.weight.data.detach().to(\"cpu\"), fused)\n except Exception:\n report[f\"{key}.mlp.w_in\"] = {\"error\": 1.0} # type: ignore\n\n print(json.dumps(report, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n\n","source_hash":"138a3eaaa0b0d1529dc8346d14adec02d84cc753d5d0f3625decdd3d1032321a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.verify_local_vs_hf","uri":"program://Program_Conditioned_Adapter/module/scripts.verify_local_vs_hf#L1-L480","kind":"module","name":"scripts.verify_local_vs_hf","path":"scripts/verify_local_vs_hf.py","language":"python","start_line":1,"end_line":480,"context_start_line":1,"context_end_line":480,"code":"import os\nimport sys\nimport json\nimport gc\nimport torch\nfrom typing import Tuple\nfrom model.hf_snapshot import ensure_snapshot\nfrom model.runtime_utils import local_logits_last\n\ndef main() -> None:\n import argparse\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--prompt\", default=\"Hello\")\n p.add_argument(\"--max-tokens\", type=int, default=64)\n p.add_argument(\"--gen-new\", type=int, default=32, help=\"Generate this many new tokens for HF/local side-by-side\")\n p.add_argument(\"--mode\", default=\"hf_guided\", choices=[\"independent\", \"hf_guided\", \"single_step\", \"independent_exact\", \"two_pass_exact\"], help=\"Verification mode\")\n p.add_argument(\"--dtype\", default=\"float32\", choices=[\"float32\", \"bfloat16\"], help=\"Computation dtype for both models during verify\")\n p.add_argument(\"--save-logits\", action=\"store_true\", help=\"Save HF/local last-step logits to disk to minimize RAM/VRAM use\")\n p.add_argument(\"--parity-exact\", action=\"store_true\", help=\"Use explicit matmul+float32 softmax attention for exact HF parity\")\n p.add_argument(\"--out-dir\", default=\"\", help=\"Directory to write saved tensors (defaults to cache-dir/trace)\")\n args = p.parse_args()\n\n os.makedirs(args.cache_dir, exist_ok=True)\n # Force stable attention backend for parity\n os.environ.setdefault(\"ATTN_BACKEND\", \"torch\")\n if args.parity_exact:\n os.environ[\"ATTN_PARITY_EXACT\"] = \"1\"\n\n from transformers import AutoTokenizer, AutoModelForCausalLM # type: ignore\n tok = AutoTokenizer.from_pretrained(args.model, use_fast=True, cache_dir=args.cache_dir)\n x = tok(args.prompt, return_tensors=\"pt\")\n\n use_cuda = torch.cuda.is_available()\n req_dtype = torch.bfloat16 if str(args.dtype) == \"bfloat16\" and use_cuda else torch.float32\n hf_device_map = \"auto\" if use_cuda else None\n\n # HF reference (load in lower precision on GPU to reduce RAM)\n hf = AutoModelForCausalLM.from_pretrained(\n args.model,\n torch_dtype=req_dtype,\n device_map=hf_device_map,\n low_cpu_mem_usage=True,\n cache_dir=args.cache_dir,\n )\n hf.eval()\n with torch.no_grad():\n # Ensure inputs are on the same device as the HF model\n try:\n hf_dev = next(hf.parameters()).device\n except Exception:\n hf_dev = torch.device(\"cpu\")\n x_hf = {k: v.to(hf_dev) for k, v in x.items()}\n out_hf = hf(**x_hf)\n logits_hf = out_hf.logits[:, -1, :].to(device=\"cpu\", dtype=torch.float32)\n hf_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n try:\n os.makedirs(out_dir, exist_ok=True)\n except Exception:\n pass\n hf_logits_path = os.path.join(out_dir, \"hf_logits.pt\")\n try:\n torch.save(logits_hf, hf_logits_path)\n except Exception:\n hf_logits_path = None\n\n # Also capture a short HF generation for comparison (force pure greedy, no suppression)\n try:\n try:\n gen_cfg = hf.generation_config # type: ignore[attr-defined]\n gen_cfg = gen_cfg.clone()\n # Force deterministic greedy without suppression/penalties\n gen_cfg.do_sample = False\n gen_cfg.temperature = 1.0\n gen_cfg.top_p = 1.0\n gen_cfg.top_k = None\n gen_cfg.repetition_penalty = 1.0\n gen_cfg.no_repeat_ngram_size = 0\n # Neutralize suppression lists if present\n if hasattr(gen_cfg, \"suppress_tokens\"):\n try:\n gen_cfg.suppress_tokens = None # type: ignore[attr-defined]\n except Exception:\n pass\n if hasattr(gen_cfg, \"begin_suppress_tokens\"):\n try:\n gen_cfg.begin_suppress_tokens = None # type: ignore[attr-defined]\n except Exception:\n pass\n except Exception:\n gen_cfg = None # best-effort\n gen_hf = hf.generate(\n **x_hf,\n generation_config=gen_cfg,\n max_new_tokens=int(args.gen_new),\n do_sample=False,\n eos_token_id=tok.eos_token_id,\n )\n try:\n in_len = int(x_hf[\"input_ids\"].shape[1])\n seq = gen_hf[0]\n new = seq[in_len:] if (hasattr(seq, \"shape\") and seq.shape[0] > in_len) else seq\n text_hf = tok.decode(new, skip_special_tokens=True)\n except Exception:\n text_hf = tok.decode(gen_hf[0], skip_special_tokens=True)\n except Exception:\n text_hf = \"[error] hf generate failed\"\n\n # Keep HF loaded when it's needed later for stepwise checks\n keep_hf_loaded = args.mode in (\"hf_guided\", \"single_step\", \"independent_exact\", \"two_pass_exact\")\n\n # If not needed later, free HF before building local to avoid peak GPU memory\n if not keep_hf_loaded:\n try:\n del out_hf\n except Exception:\n pass\n try:\n del hf\n except Exception:\n pass\n gc.collect()\n try:\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n except Exception:\n pass\n\n # Local\n ckpt_dir = ensure_snapshot(args.model, args.cache_dir)\n from specs.config import ModelConfig\n from model.factory import build_causal_lm\n from model.hf_llama_loader import load_hf_llama_weights_into_local\n\n cfg = json.load(open(os.path.join(ckpt_dir, \"config.json\"), \"r\", encoding=\"utf-8\"))\n # Derive pad token id from tokenizer or config\n pad_id = tok.pad_token_id\n if pad_id is None:\n try:\n pad_id = int(cfg.get(\"pad_token_id\")) if (cfg.get(\"pad_token_id\") is not None) else None\n except Exception:\n pad_id = None\n mc = ModelConfig(\n d_model=int(cfg.get(\"hidden_size\")),\n n_heads=int(cfg.get(\"num_attention_heads\")),\n n_layers=int(cfg.get(\"num_hidden_layers\")),\n d_ff=int(cfg.get(\"intermediate_size\")),\n vocab_size=int(cfg.get(\"vocab_size\")),\n head_dim=int(cfg.get(\"head_dim\", int(cfg.get(\"hidden_size\")) // int(cfg.get(\"num_attention_heads\")))),\n rope_theta=float(cfg.get(\"rope_parameters\", {}).get(\"rope_theta\", cfg.get(\"rope_theta\", 1e6))),\n dtype=(\"bfloat16\" if (req_dtype == torch.bfloat16) else \"float32\"),\n attn_impl=\"sdpa\",\n rms_norm_eps=float(cfg.get(\"rms_norm_eps\", 1e-6)),\n rope_scaling_type=(cfg.get(\"rope_scaling\", {}) or {}).get(\"type\"),\n rope_scaling_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"factor\"),\n rope_scaling_original_max_position_embeddings=(cfg.get(\"rope_scaling\", {}) or {}).get(\"original_max_position_embeddings\"),\n rope_scaling_low_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"low_freq_factor\"),\n rope_scaling_high_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"high_freq_factor\"),\n pad_token_id=(int(pad_id) if pad_id is not None else None),\n )\n n_kv_heads = int(cfg.get(\"num_key_value_heads\", mc.n_heads))\n tie_we = bool(cfg.get(\"tie_word_embeddings\", True))\n local = build_causal_lm(mc, block=\"llama\", n_kv_heads=n_kv_heads, tie_weights=tie_we)\n load_hf_llama_weights_into_local(local, ckpt_dir)\n if use_cuda:\n local = local.to(device=\"cuda\", dtype=req_dtype).eval()\n x_cuda = {k: v.to(\"cuda\") for k, v in x.items()}\n with torch.no_grad():\n out_loc_fwd = local(input_ids=x_cuda[\"input_ids\"], attention_mask=x_cuda.get(\"attention_mask\", None), return_dict=True)\n logits_local = out_loc_fwd[\"logits\"][:, -1, :].to(device=\"cpu\", dtype=torch.float32)\n loc_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n try:\n os.makedirs(out_dir, exist_ok=True)\n except Exception:\n pass\n loc_logits_path = os.path.join(out_dir, \"local_logits.pt\")\n try:\n torch.save(logits_local, loc_logits_path)\n except Exception:\n loc_logits_path = None\n # Local generation (greedy); exact parity can use stepwise no-cache recompute\n try:\n if args.parity_exact:\n seq = x_cuda[\"input_ids\"].clone()\n attn = x_cuda.get(\"attention_mask\", None)\n for _ in range(int(args.gen_new)):\n out_step = local(input_ids=seq[:, -1:], attention_mask=attn, return_dict=True)\n logits = out_step[\"logits\"][:, -1, :]\n nxt = torch.argmax(logits, dim=-1).view(1, 1)\n seq = torch.cat([seq, nxt], dim=1)\n if attn is not None:\n attn = torch.cat([attn, torch.ones_like(nxt, device=attn.device, dtype=attn.dtype)], dim=1)\n if tok.eos_token_id is not None and int(nxt[0,0].item()) == int(tok.eos_token_id):\n break\n s0 = seq[0].to(\"cpu\")\n in_len = int(x_cuda[\"input_ids\"].shape[1])\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n else:\n out_loc = local.generate(\n input_ids=x_cuda[\"input_ids\"],\n attention_mask=x_cuda.get(\"attention_mask\", None),\n max_new_tokens=int(args.gen_new),\n do_sample=False,\n eos_token_id=tok.eos_token_id,\n return_dict=True,\n )\n seq = out_loc.get(\"sequences\", None)\n if seq is None:\n seq = out_loc # best-effort\n in_len = int(x_cuda[\"input_ids\"].shape[1])\n s0 = seq[0].to(\"cpu\")\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n except Exception:\n text_local = \"[error] local generate failed\"\n else:\n local = local.to(device=\"cpu\", dtype=torch.float32).eval()\n with torch.no_grad():\n out_loc_fwd = local(input_ids=x[\"input_ids\"], attention_mask=x.get(\"attention_mask\", None), return_dict=True)\n logits_local = out_loc_fwd[\"logits\"][:, -1, :]\n loc_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n try:\n os.makedirs(out_dir, exist_ok=True)\n except Exception:\n pass\n loc_logits_path = os.path.join(out_dir, \"local_logits.pt\")\n try:\n torch.save(logits_local.to(device=\"cpu\", dtype=torch.float32), loc_logits_path)\n except Exception:\n loc_logits_path = None\n try:\n if args.parity_exact:\n seq = x[\"input_ids\"].clone()\n attn = x.get(\"attention_mask\", None)\n for _ in range(int(args.gen_new)):\n out_step = local(input_ids=seq[:, -1:], attention_mask=attn, return_dict=True)\n logits = out_step[\"logits\"][:, -1, :]\n nxt = torch.argmax(logits, dim=-1).view(1, 1)\n seq = torch.cat([seq, nxt], dim=1)\n if attn is not None:\n attn = torch.cat([attn, torch.ones_like(nxt, dtype=attn.dtype)], dim=1)\n if tok.eos_token_id is not None and int(nxt[0,0].item()) == int(tok.eos_token_id):\n break\n s0 = seq[0]\n in_len = int(x[\"input_ids\"].shape[1])\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n else:\n out_loc = local.generate(\n input_ids=x[\"input_ids\"],\n attention_mask=x.get(\"attention_mask\", None),\n max_new_tokens=int(args.gen_new),\n do_sample=False,\n eos_token_id=tok.eos_token_id,\n return_dict=True,\n )\n seq = out_loc.get(\"sequences\", None)\n if seq is None:\n seq = out_loc\n in_len = int(x[\"input_ids\"].shape[1])\n s0 = seq[0]\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n except Exception:\n text_local = \"[error] local generate failed\"\n\n # Guided/single-step parity and two-pass exact (single model in memory)\n guided_report = {}\n if args.mode in (\"hf_guided\", \"single_step\", \"independent_exact\"):\n import os as _os\n _trace = (_os.getenv(\"VERIFY_TRACE\", \"0\") == \"1\")\n with torch.no_grad():\n # Devices for HF/local\n try:\n hf_dev = next(hf.parameters()).device\n except Exception:\n hf_dev = torch.device(\"cpu\")\n loc_dev = (torch.device(\"cuda\") if use_cuda else torch.device(\"cpu\"))\n # Initialize sequences on devices\n seq_cpu = x[\"input_ids\"].to(torch.long)\n seq_hf = seq_cpu.to(hf_dev)\n seq_loc = seq_cpu.to(loc_dev)\n attn_hf = x.get(\"attention_mask\", None)\n attn_hf = attn_hf.to(hf_dev) if attn_hf is not None else None\n attn_loc = x.get(\"attention_mask\", None)\n attn_loc = attn_loc.to(loc_dev) if attn_loc is not None else None\n steps = int(args.gen_new if args.mode in (\"hf_guided\", \"independent_exact\") else 1)\n mismatches = []\n for t in range(steps):\n # HF one-step (no cache)\n out_h = hf(input_ids=seq_hf, attention_mask=attn_hf)\n logits_h = out_h.logits[:, -1, :]\n next_h = torch.argmax(logits_h, dim=-1)\n # Local one-step (no cache, parity_exact attention if requested)\n out_l = local(input_ids=seq_loc, attention_mask=attn_loc, return_dict=True)\n logits_l = out_l[\"logits\"][:, -1, :]\n next_l = torch.argmax(logits_l, dim=-1)\n if _trace:\n try:\n print(f\"[verify] step={t} indep_exact next_h={int(next_h[0].item())} next_l={int(next_l[0].item())}\")\n except Exception:\n pass\n if int(next_h[0].item()) != int(next_l[0].item()):\n mismatches.append({\n \"step\": t,\n \"hf\": int(next_h[0].item()),\n \"local\": int(next_l[0].item()),\n })\n if args.mode == \"independent_exact\":\n break\n # Advance sequences\n if args.mode == \"hf_guided\":\n # append HF token to both\n seq_hf = torch.cat([seq_hf, next_h.view(1, 1)], dim=1)\n seq_loc = torch.cat([seq_loc, next_h.to(device=seq_loc.device).view(1, 1)], dim=1)\n if attn_hf is not None:\n attn_hf = torch.cat([attn_hf, torch.ones_like(next_h).view(1, 1)], dim=1)\n if attn_loc is not None:\n attn_loc = torch.cat([attn_loc, torch.ones_like(next_h).view(1, 1).to(attn_loc.device)], dim=1)\n else:\n # independent exact: each model appends its own token\n seq_hf = torch.cat([seq_hf, next_h.view(1, 1)], dim=1)\n seq_loc = torch.cat([seq_loc, next_l.view(1, 1)], dim=1)\n if attn_hf is not None:\n attn_hf = torch.cat([attn_hf, torch.ones_like(next_h).view(1, 1)], dim=1)\n if attn_loc is not None:\n attn_loc = torch.cat([attn_loc, torch.ones_like(next_l).view(1, 1)], dim=1)\n # Early stop on eos\n eos_id = tok.eos_token_id\n if eos_id is not None and int(next_h[0].item()) == int(eos_id):\n break\n # Decode final guided sequence\n new_tokens = seq_cpu.shape[1]\n out_seq = seq_hf[0].to(\"cpu\")\n text_guided = tok.decode(out_seq[new_tokens:], skip_special_tokens=True)\n guided_report = {\"guided_text\": text_guided, \"mismatches\": mismatches}\n\n if args.mode == \"two_pass_exact\":\n # Pass 1: HF stepwise on current device; record tokens\n import os as _os\n _trace = (_os.getenv(\"VERIFY_TRACE\", \"0\") == \"1\")\n with torch.no_grad():\n try:\n hf_dev = next(hf.parameters()).device\n except Exception:\n hf_dev = torch.device(\"cpu\")\n seq_cpu = x[\"input_ids\"].to(torch.long)\n seq_hf = seq_cpu.to(hf_dev)\n attn_hf = x.get(\"attention_mask\", None)\n attn_hf = attn_hf.to(hf_dev) if attn_hf is not None else None\n hf_tokens: list[int] = []\n for t in range(int(args.gen_new)):\n out_h = hf(input_ids=seq_hf, attention_mask=attn_hf)\n logits_h = out_h.logits[:, -1, :]\n next_h = int(torch.argmax(logits_h, dim=-1)[0].item())\n hf_tokens.append(next_h)\n if _trace:\n print(f\"[verify] pass1 step={t} hf_token={next_h}\")\n seq_hf = torch.cat([seq_hf, torch.tensor([[next_h]], device=seq_hf.device)], dim=1)\n if attn_hf is not None:\n attn_hf = torch.cat([attn_hf, torch.ones((1,1), device=attn_hf.device, dtype=attn_hf.dtype)], dim=1)\n eos_id = tok.eos_token_id\n if eos_id is not None and next_h == int(eos_id):\n break\n # Decode HF text\n out_seq = seq_hf[0].to(\"cpu\")\n text_guided = tok.decode(out_seq[seq_cpu.shape[1]:], skip_special_tokens=True)\n guided_report = {\"guided_text\": text_guided, \"mismatches\": []}\n # Free HF completely before local\n try:\n del hf\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n except Exception:\n pass\n # Pass 2: Local stepwise on GPU/CPU; compare tokens at each step\n # Rebuild local (already built above as `local`); if freed earlier, ensure available\n with torch.no_grad():\n # Initialize from original prompt\n if use_cuda:\n seq_loc = x[\"input_ids\"].to(\"cuda\")\n attn_loc = x.get(\"attention_mask\", None)\n attn_loc = attn_loc.to(\"cuda\") if attn_loc is not None else None\n else:\n seq_loc = x[\"input_ids\"].clone()\n attn_loc = x.get(\"attention_mask\", None)\n mismatches: list[dict] = []\n for step, tok_id in enumerate(hf_tokens):\n out_l = local(input_ids=seq_loc, attention_mask=attn_loc, return_dict=True)\n logits_l = out_l[\"logits\"][:, -1, :]\n next_l = int(torch.argmax(logits_l, dim=-1)[0].item())\n if _trace:\n print(f\"[verify] pass2 step={step} hf_token={int(tok_id)} local_next={next_l}\")\n if next_l != int(tok_id):\n mismatches.append({\"step\": step, \"hf\": int(tok_id), \"local\": next_l})\n break\n # Append HF token (teacher-forced) to keep prefixes identical\n t =\n# ... truncated ...","source_hash":"3b25fa7e59fa0580e35ab3013aedc7c72da96a1594722b2ecb06cfb22853d1f1","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"py:scripts.verify_local_vs_hf.main","uri":"program://Program_Conditioned_Adapter/function/scripts.verify_local_vs_hf.main#L10-L474","kind":"function","name":"main","path":"scripts/verify_local_vs_hf.py","language":"python","start_line":10,"end_line":474,"context_start_line":1,"context_end_line":480,"code":"import os\nimport sys\nimport json\nimport gc\nimport torch\nfrom typing import Tuple\nfrom model.hf_snapshot import ensure_snapshot\nfrom model.runtime_utils import local_logits_last\n\ndef main() -> None:\n import argparse\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--prompt\", default=\"Hello\")\n p.add_argument(\"--max-tokens\", type=int, default=64)\n p.add_argument(\"--gen-new\", type=int, default=32, help=\"Generate this many new tokens for HF/local side-by-side\")\n p.add_argument(\"--mode\", default=\"hf_guided\", choices=[\"independent\", \"hf_guided\", \"single_step\", \"independent_exact\", \"two_pass_exact\"], help=\"Verification mode\")\n p.add_argument(\"--dtype\", default=\"float32\", choices=[\"float32\", \"bfloat16\"], help=\"Computation dtype for both models during verify\")\n p.add_argument(\"--save-logits\", action=\"store_true\", help=\"Save HF/local last-step logits to disk to minimize RAM/VRAM use\")\n p.add_argument(\"--parity-exact\", action=\"store_true\", help=\"Use explicit matmul+float32 softmax attention for exact HF parity\")\n p.add_argument(\"--out-dir\", default=\"\", help=\"Directory to write saved tensors (defaults to cache-dir/trace)\")\n args = p.parse_args()\n\n os.makedirs(args.cache_dir, exist_ok=True)\n # Force stable attention backend for parity\n os.environ.setdefault(\"ATTN_BACKEND\", \"torch\")\n if args.parity_exact:\n os.environ[\"ATTN_PARITY_EXACT\"] = \"1\"\n\n from transformers import AutoTokenizer, AutoModelForCausalLM # type: ignore\n tok = AutoTokenizer.from_pretrained(args.model, use_fast=True, cache_dir=args.cache_dir)\n x = tok(args.prompt, return_tensors=\"pt\")\n\n use_cuda = torch.cuda.is_available()\n req_dtype = torch.bfloat16 if str(args.dtype) == \"bfloat16\" and use_cuda else torch.float32\n hf_device_map = \"auto\" if use_cuda else None\n\n # HF reference (load in lower precision on GPU to reduce RAM)\n hf = AutoModelForCausalLM.from_pretrained(\n args.model,\n torch_dtype=req_dtype,\n device_map=hf_device_map,\n low_cpu_mem_usage=True,\n cache_dir=args.cache_dir,\n )\n hf.eval()\n with torch.no_grad():\n # Ensure inputs are on the same device as the HF model\n try:\n hf_dev = next(hf.parameters()).device\n except Exception:\n hf_dev = torch.device(\"cpu\")\n x_hf = {k: v.to(hf_dev) for k, v in x.items()}\n out_hf = hf(**x_hf)\n logits_hf = out_hf.logits[:, -1, :].to(device=\"cpu\", dtype=torch.float32)\n hf_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n try:\n os.makedirs(out_dir, exist_ok=True)\n except Exception:\n pass\n hf_logits_path = os.path.join(out_dir, \"hf_logits.pt\")\n try:\n torch.save(logits_hf, hf_logits_path)\n except Exception:\n hf_logits_path = None\n\n # Also capture a short HF generation for comparison (force pure greedy, no suppression)\n try:\n try:\n gen_cfg = hf.generation_config # type: ignore[attr-defined]\n gen_cfg = gen_cfg.clone()\n # Force deterministic greedy without suppression/penalties\n gen_cfg.do_sample = False\n gen_cfg.temperature = 1.0\n gen_cfg.top_p = 1.0\n gen_cfg.top_k = None\n gen_cfg.repetition_penalty = 1.0\n gen_cfg.no_repeat_ngram_size = 0\n # Neutralize suppression lists if present\n if hasattr(gen_cfg, \"suppress_tokens\"):\n try:\n gen_cfg.suppress_tokens = None # type: ignore[attr-defined]\n except Exception:\n pass\n if hasattr(gen_cfg, \"begin_suppress_tokens\"):\n try:\n gen_cfg.begin_suppress_tokens = None # type: ignore[attr-defined]\n except Exception:\n pass\n except Exception:\n gen_cfg = None # best-effort\n gen_hf = hf.generate(\n **x_hf,\n generation_config=gen_cfg,\n max_new_tokens=int(args.gen_new),\n do_sample=False,\n eos_token_id=tok.eos_token_id,\n )\n try:\n in_len = int(x_hf[\"input_ids\"].shape[1])\n seq = gen_hf[0]\n new = seq[in_len:] if (hasattr(seq, \"shape\") and seq.shape[0] > in_len) else seq\n text_hf = tok.decode(new, skip_special_tokens=True)\n except Exception:\n text_hf = tok.decode(gen_hf[0], skip_special_tokens=True)\n except Exception:\n text_hf = \"[error] hf generate failed\"\n\n # Keep HF loaded when it's needed later for stepwise checks\n keep_hf_loaded = args.mode in (\"hf_guided\", \"single_step\", \"independent_exact\", \"two_pass_exact\")\n\n # If not needed later, free HF before building local to avoid peak GPU memory\n if not keep_hf_loaded:\n try:\n del out_hf\n except Exception:\n pass\n try:\n del hf\n except Exception:\n pass\n gc.collect()\n try:\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n except Exception:\n pass\n\n # Local\n ckpt_dir = ensure_snapshot(args.model, args.cache_dir)\n from specs.config import ModelConfig\n from model.factory import build_causal_lm\n from model.hf_llama_loader import load_hf_llama_weights_into_local\n\n cfg = json.load(open(os.path.join(ckpt_dir, \"config.json\"), \"r\", encoding=\"utf-8\"))\n # Derive pad token id from tokenizer or config\n pad_id = tok.pad_token_id\n if pad_id is None:\n try:\n pad_id = int(cfg.get(\"pad_token_id\")) if (cfg.get(\"pad_token_id\") is not None) else None\n except Exception:\n pad_id = None\n mc = ModelConfig(\n d_model=int(cfg.get(\"hidden_size\")),\n n_heads=int(cfg.get(\"num_attention_heads\")),\n n_layers=int(cfg.get(\"num_hidden_layers\")),\n d_ff=int(cfg.get(\"intermediate_size\")),\n vocab_size=int(cfg.get(\"vocab_size\")),\n head_dim=int(cfg.get(\"head_dim\", int(cfg.get(\"hidden_size\")) // int(cfg.get(\"num_attention_heads\")))),\n rope_theta=float(cfg.get(\"rope_parameters\", {}).get(\"rope_theta\", cfg.get(\"rope_theta\", 1e6))),\n dtype=(\"bfloat16\" if (req_dtype == torch.bfloat16) else \"float32\"),\n attn_impl=\"sdpa\",\n rms_norm_eps=float(cfg.get(\"rms_norm_eps\", 1e-6)),\n rope_scaling_type=(cfg.get(\"rope_scaling\", {}) or {}).get(\"type\"),\n rope_scaling_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"factor\"),\n rope_scaling_original_max_position_embeddings=(cfg.get(\"rope_scaling\", {}) or {}).get(\"original_max_position_embeddings\"),\n rope_scaling_low_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"low_freq_factor\"),\n rope_scaling_high_freq_factor=(cfg.get(\"rope_scaling\", {}) or {}).get(\"high_freq_factor\"),\n pad_token_id=(int(pad_id) if pad_id is not None else None),\n )\n n_kv_heads = int(cfg.get(\"num_key_value_heads\", mc.n_heads))\n tie_we = bool(cfg.get(\"tie_word_embeddings\", True))\n local = build_causal_lm(mc, block=\"llama\", n_kv_heads=n_kv_heads, tie_weights=tie_we)\n load_hf_llama_weights_into_local(local, ckpt_dir)\n if use_cuda:\n local = local.to(device=\"cuda\", dtype=req_dtype).eval()\n x_cuda = {k: v.to(\"cuda\") for k, v in x.items()}\n with torch.no_grad():\n out_loc_fwd = local(input_ids=x_cuda[\"input_ids\"], attention_mask=x_cuda.get(\"attention_mask\", None), return_dict=True)\n logits_local = out_loc_fwd[\"logits\"][:, -1, :].to(device=\"cpu\", dtype=torch.float32)\n loc_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n try:\n os.makedirs(out_dir, exist_ok=True)\n except Exception:\n pass\n loc_logits_path = os.path.join(out_dir, \"local_logits.pt\")\n try:\n torch.save(logits_local, loc_logits_path)\n except Exception:\n loc_logits_path = None\n # Local generation (greedy); exact parity can use stepwise no-cache recompute\n try:\n if args.parity_exact:\n seq = x_cuda[\"input_ids\"].clone()\n attn = x_cuda.get(\"attention_mask\", None)\n for _ in range(int(args.gen_new)):\n out_step = local(input_ids=seq[:, -1:], attention_mask=attn, return_dict=True)\n logits = out_step[\"logits\"][:, -1, :]\n nxt = torch.argmax(logits, dim=-1).view(1, 1)\n seq = torch.cat([seq, nxt], dim=1)\n if attn is not None:\n attn = torch.cat([attn, torch.ones_like(nxt, device=attn.device, dtype=attn.dtype)], dim=1)\n if tok.eos_token_id is not None and int(nxt[0,0].item()) == int(tok.eos_token_id):\n break\n s0 = seq[0].to(\"cpu\")\n in_len = int(x_cuda[\"input_ids\"].shape[1])\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n else:\n out_loc = local.generate(\n input_ids=x_cuda[\"input_ids\"],\n attention_mask=x_cuda.get(\"attention_mask\", None),\n max_new_tokens=int(args.gen_new),\n do_sample=False,\n eos_token_id=tok.eos_token_id,\n return_dict=True,\n )\n seq = out_loc.get(\"sequences\", None)\n if seq is None:\n seq = out_loc # best-effort\n in_len = int(x_cuda[\"input_ids\"].shape[1])\n s0 = seq[0].to(\"cpu\")\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n except Exception:\n text_local = \"[error] local generate failed\"\n else:\n local = local.to(device=\"cpu\", dtype=torch.float32).eval()\n with torch.no_grad():\n out_loc_fwd = local(input_ids=x[\"input_ids\"], attention_mask=x.get(\"attention_mask\", None), return_dict=True)\n logits_local = out_loc_fwd[\"logits\"][:, -1, :]\n loc_logits_path = None\n if args.save_logits:\n out_dir = args.out_dir or os.path.join(args.cache_dir, \"trace\")\n try:\n os.makedirs(out_dir, exist_ok=True)\n except Exception:\n pass\n loc_logits_path = os.path.join(out_dir, \"local_logits.pt\")\n try:\n torch.save(logits_local.to(device=\"cpu\", dtype=torch.float32), loc_logits_path)\n except Exception:\n loc_logits_path = None\n try:\n if args.parity_exact:\n seq = x[\"input_ids\"].clone()\n attn = x.get(\"attention_mask\", None)\n for _ in range(int(args.gen_new)):\n out_step = local(input_ids=seq[:, -1:], attention_mask=attn, return_dict=True)\n logits = out_step[\"logits\"][:, -1, :]\n nxt = torch.argmax(logits, dim=-1).view(1, 1)\n seq = torch.cat([seq, nxt], dim=1)\n if attn is not None:\n attn = torch.cat([attn, torch.ones_like(nxt, dtype=attn.dtype)], dim=1)\n if tok.eos_token_id is not None and int(nxt[0,0].item()) == int(tok.eos_token_id):\n break\n s0 = seq[0]\n in_len = int(x[\"input_ids\"].shape[1])\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n else:\n out_loc = local.generate(\n input_ids=x[\"input_ids\"],\n attention_mask=x.get(\"attention_mask\", None),\n max_new_tokens=int(args.gen_new),\n do_sample=False,\n eos_token_id=tok.eos_token_id,\n return_dict=True,\n )\n seq = out_loc.get(\"sequences\", None)\n if seq is None:\n seq = out_loc\n in_len = int(x[\"input_ids\"].shape[1])\n s0 = seq[0]\n new = s0[in_len:] if (hasattr(s0, \"shape\") and s0.shape[0] > in_len) else s0\n text_local = tok.decode(new, skip_special_tokens=True)\n except Exception:\n text_local = \"[error] local generate failed\"\n\n # Guided/single-step parity and two-pass exact (single model in memory)\n guided_report = {}\n if args.mode in (\"hf_guided\", \"single_step\", \"independent_exact\"):\n import os as _os\n _trace = (_os.getenv(\"VERIFY_TRACE\", \"0\") == \"1\")\n with torch.no_grad():\n # Devices for HF/local\n try:\n hf_dev = next(hf.parameters()).device\n except Exception:\n hf_dev = torch.device(\"cpu\")\n loc_dev = (torch.device(\"cuda\") if use_cuda else torch.device(\"cpu\"))\n # Initialize sequences on devices\n seq_cpu = x[\"input_ids\"].to(torch.long)\n seq_hf = seq_cpu.to(hf_dev)\n seq_loc = seq_cpu.to(loc_dev)\n attn_hf = x.get(\"attention_mask\", None)\n attn_hf = attn_hf.to(hf_dev) if attn_hf is not None else None\n attn_loc = x.get(\"attention_mask\", None)\n attn_loc = attn_loc.to(loc_dev) if attn_loc is not None else None\n steps = int(args.gen_new if args.mode in (\"hf_guided\", \"independent_exact\") else 1)\n mismatches = []\n for t in range(steps):\n # HF one-step (no cache)\n out_h = hf(input_ids=seq_hf, attention_mask=attn_hf)\n logits_h = out_h.logits[:, -1, :]\n next_h = torch.argmax(logits_h, dim=-1)\n # Local one-step (no cache, parity_exact attention if requested)\n out_l = local(input_ids=seq_loc, attention_mask=attn_loc, return_dict=True)\n logits_l = out_l[\"logits\"][:, -1, :]\n next_l = torch.argmax(logits_l, dim=-1)\n if _trace:\n try:\n print(f\"[verify] step={t} indep_exact next_h={int(next_h[0].item())} next_l={int(next_l[0].item())}\")\n except Exception:\n pass\n if int(next_h[0].item()) != int(next_l[0].item()):\n mismatches.append({\n \"step\": t,\n \"hf\": int(next_h[0].item()),\n \"local\": int(next_l[0].item()),\n })\n if args.mode == \"independent_exact\":\n break\n # Advance sequences\n if args.mode == \"hf_guided\":\n # append HF token to both\n seq_hf = torch.cat([seq_hf, next_h.view(1, 1)], dim=1)\n seq_loc = torch.cat([seq_loc, next_h.to(device=seq_loc.device).view(1, 1)], dim=1)\n if attn_hf is not None:\n attn_hf = torch.cat([attn_hf, torch.ones_like(next_h).view(1, 1)], dim=1)\n if attn_loc is not None:\n attn_loc = torch.cat([attn_loc, torch.ones_like(next_h).view(1, 1).to(attn_loc.device)], dim=1)\n else:\n # independent exact: each model appends its own token\n seq_hf = torch.cat([seq_hf, next_h.view(1, 1)], dim=1)\n seq_loc = torch.cat([seq_loc, next_l.view(1, 1)], dim=1)\n if attn_hf is not None:\n attn_hf = torch.cat([attn_hf, torch.ones_like(next_h).view(1, 1)], dim=1)\n if attn_loc is not None:\n attn_loc = torch.cat([attn_loc, torch.ones_like(next_l).view(1, 1)], dim=1)\n # Early stop on eos\n eos_id = tok.eos_token_id\n if eos_id is not None and int(next_h[0].item()) == int(eos_id):\n break\n # Decode final guided sequence\n new_tokens = seq_cpu.shape[1]\n out_seq = seq_hf[0].to(\"cpu\")\n text_guided = tok.decode(out_seq[new_tokens:], skip_special_tokens=True)\n guided_report = {\"guided_text\": text_guided, \"mismatches\": mismatches}\n\n if args.mode == \"two_pass_exact\":\n # Pass 1: HF stepwise on current device; record tokens\n import os as _os\n _trace = (_os.getenv(\"VERIFY_TRACE\", \"0\") == \"1\")\n with torch.no_grad():\n try:\n hf_dev = next(hf.parameters()).device\n except Exception:\n hf_dev = torch.device(\"cpu\")\n seq_cpu = x[\"input_ids\"].to(torch.long)\n seq_hf = seq_cpu.to(hf_dev)\n attn_hf = x.get(\"attention_mask\", None)\n attn_hf = attn_hf.to(hf_dev) if attn_hf is not None else None\n hf_tokens: list[int] = []\n for t in range(int(args.gen_new)):\n out_h = hf(input_ids=seq_hf, attention_mask=attn_hf)\n logits_h = out_h.logits[:, -1, :]\n next_h = int(torch.argmax(logits_h, dim=-1)[0].item())\n hf_tokens.append(next_h)\n if _trace:\n print(f\"[verify] pass1 step={t} hf_token={next_h}\")\n seq_hf = torch.cat([seq_hf, torch.tensor([[next_h]], device=seq_hf.device)], dim=1)\n if attn_hf is not None:\n attn_hf = torch.cat([attn_hf, torch.ones((1,1), device=attn_hf.device, dtype=attn_hf.dtype)], dim=1)\n eos_id = tok.eos_token_id\n if eos_id is not None and next_h == int(eos_id):\n break\n # Decode HF text\n out_seq = seq_hf[0].to(\"cpu\")\n text_guided = tok.decode(out_seq[seq_cpu.shape[1]:], skip_special_tokens=True)\n guided_report = {\"guided_text\": text_guided, \"mismatches\": []}\n # Free HF completely before local\n try:\n del hf\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n except Exception:\n pass\n # Pass 2: Local stepwise on GPU/CPU; compare tokens at each step\n # Rebuild local (already built above as `local`); if freed earlier, ensure available\n with torch.no_grad():\n # Initialize from original prompt\n if use_cuda:\n seq_loc = x[\"input_ids\"].to(\"cuda\")\n attn_loc = x.get(\"attention_mask\", None)\n attn_loc = attn_loc.to(\"cuda\") if attn_loc is not None else None\n else:\n seq_loc = x[\"input_ids\"].clone()\n attn_loc = x.get(\"attention_mask\", None)\n mismatches: list[dict] = []\n for step, tok_id in enumerate(hf_tokens):\n out_l = local(input_ids=seq_loc, attention_mask=attn_loc, return_dict=True)\n logits_l = out_l[\"logits\"][:, -1, :]\n next_l = int(torch.argmax(logits_l, dim=-1)[0].item())\n if _trace:\n print(f\"[verify] pass2 step={step} hf_token={int(tok_id)} local_next={next_l}\")\n if next_l != int(tok_id):\n mismatches.append({\"step\": step, \"hf\": int(tok_id), \"local\": next_l})\n break\n # Append HF token (teacher-forced) to keep prefixes identical\n t =\n# ... truncated ...","source_hash":"3b25fa7e59fa0580e35ab3013aedc7c72da96a1594722b2ecb06cfb22853d1f1","truncated":true} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:run.py","uri":"program://Program_Conditioned_Adapter/file/run.py","kind":"file","name":"run.py","path":"run.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport argparse\nfrom pathlib import Path\nimport subprocess\nfrom typing import Dict, Any\nimport json\nimport os as _os\nimport importlib\n\nimport numpy as np\nfrom model.hf_snapshot import ensure_snapshot\nfrom modules.retrieval_policy import RetrievalPolicy # type: ignore\nfrom modules.runner_core import select_region, prepare_citations # type: ignore\n\n\n\n\ndef main() -> None:","source_hash":"22e0ac2e1ea477b2b98389629652af634a8290bc1a59f96b58a4505a31fbf152","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:build.py","uri":"program://Program_Conditioned_Adapter/file/build.py","kind":"file","name":"build.py","path":"build.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport argparse\nimport json\nimport hashlib\nimport subprocess\nfrom datetime import datetime, timezone\nfrom typing import Optional, Dict, Tuple, List\nimport sys\nimport random\nimport platform\nimport importlib\n\nimport numpy as np\n\nfrom modules.embedding import ( # type: ignore\n build_program_embedding,\n build_subgraph_embedding_from_program,\n)\nfrom modules.adapter import ( # type: ignore\n generate_lora_from_embedding,\n generate_lora_from_embedding_torch,","source_hash":"de2f270737807a4186a93dca8d809eb69862aadb302a59053827e8ee84cf8444","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:smoke_repo/attention_demo.py","uri":"program://Program_Conditioned_Adapter/file/smoke_repo/attention_demo.py","kind":"file","name":"smoke_repo/attention_demo.py","path":"smoke_repo/attention_demo.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nA tiny, self-contained attention demo used for repo-grounding smoke tests.\n\nThe functions intentionally include clear docstrings and simple math so\nretrieval by the runner can surface these lines in context windows.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Tuple\nimport math\n\n\ndef attention_score(query: float, key: float) -> float:\n \"\"\"\n Compute an unnormalized attention score between a scalar query and key.\n\n This mirrors the dot-product component in scaled dot-product attention for\n the 1-D case. In higher dimensions, the score is the dot product of the\n query and key vectors.\n \"\"\"","source_hash":"879bae3bd5b3cef532e3c334000865c03b308eae03d4133f0446b5b6014ffdb4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/embedding.py","uri":"program://Program_Conditioned_Adapter/file/modules/embedding.py","kind":"file","name":"modules/embedding.py","path":"modules/embedding.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport os\nimport re\nimport numpy as np\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\nfrom modules.program_graph import ProgramGraph, Artifact # type: ignore\n\nEMBED_DIM_DEFAULT = 128\nHASH_SEEDS = [1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344]\n\ndef auto_model_dims(model_id: str, cache_dir: Optional[str]) -> Tuple[int, int]:\n try:\n from transformers import AutoConfig # type: ignore\n\n cfg = AutoConfig.from_pretrained(model_id, cache_dir=cache_dir)\n n_layers = int(getattr(cfg, \"num_hidden_layers\", 0) or 0)\n d_model = int(getattr(cfg, \"hidden_size\", 0) or 0)","source_hash":"34d3b52ea84ae1ac09f84f406f99680a8a2c348a2cc6bb772d41cf000edea73d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/runtime.py","uri":"program://Program_Conditioned_Adapter/file/modules/runtime.py","kind":"file","name":"modules/runtime.py","path":"modules/runtime.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple\nimport os\nimport subprocess\n\nimport numpy as np\nimport torch\n\n# Reuse existing module utilities\n# No example-specific imports in core runtime\nfrom model.inspect import detect_target_names_from_model_full\n\n@dataclass\nclass OTFFlags:\n of_sources: str = \"question\" # \"zoom\" | \"question\"\n zoom_symbol: Optional[str] = None\n zoom_radius: int = 1\n include_text: bool = True\n text_max_bytes: int = 250_000","source_hash":"7cf1fff47ef64c55d62db5bbc76e1e5724bfa452ac4dd0aab0a8b78b84144154","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/caches.py","uri":"program://Program_Conditioned_Adapter/file/modules/caches.py","kind":"file","name":"modules/caches.py","path":"modules/caches.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom typing import Dict, Iterable, Iterator, List, Optional, Tuple\n\ndef _ensure_path(p: Optional[str]) -> Optional[str]:\n if not p:\n return None\n pth = os.path.abspath(os.path.expanduser(os.path.expandvars(p)))\n return pth if os.path.exists(pth) else None\n\ndef load_manifest(adapters_dir: str) -> Dict:\n mf = os.path.join(adapters_dir, \"manifest.json\")\n try:\n return json.loads(open(mf, \"r\", encoding=\"utf-8\").read())\n except Exception:\n return {}\n\ndef resolve_cache_path(manifest: Dict, key: str, default_path: str) -> str:\n try:","source_hash":"0a856fc667f212481185738dda8b448f7c217ce3c7f522605de45f24200f615f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/program_graph.py","uri":"program://Program_Conditioned_Adapter/file/modules/program_graph.py","kind":"file","name":"modules/program_graph.py","path":"modules/program_graph.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Iterable, Protocol, Optional, Tuple, Dict, List\n\n\n# Core types\nEntityId = str\n\n\n@dataclass(frozen=True)\nclass Span:\n start_line: int\n end_line: int # inclusive, 1-based\n\n\n@dataclass(frozen=True)\nclass Entity:\n uri: str\n id: EntityId\n kind: str","source_hash":"bf3058c09f5fd78d8feeace4fe40cfb9c6d563e96d1f3a62421067d9ea0bead6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/context.py","uri":"program://Program_Conditioned_Adapter/file/modules/context.py","kind":"file","name":"modules/context.py","path":"modules/context.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# function-first packer (lift from run_repo_adapter.py)\n\nfrom typing import List, Optional, Tuple\nimport os\nimport torch # type: ignore\n\n\ndef pack_context_heads(program_root: str, files: List[str], tok, budget_tokens: int) -> str:\n lines_out: List[str] = [\"Program snippets:\"]\n used = 0\n for rel in files:\n abs_fp = rel if os.path.isabs(rel) else os.path.abspath(os.path.join(program_root, rel))\n try:\n src_lines = open(abs_fp, \"r\", encoding=\"utf-8\", errors=\"ignore\").read().splitlines()\n except Exception:\n continue\n head_n = min(len(src_lines), 120)\n block = [f\"[ctx] path: {os.path.relpath(abs_fp, program_root)}:1-{head_n}\"] + src_lines[:head_n] + [\"\"]\n text = \"\\n\".join(block) + \"\\n\"\n t = len(tok(text).input_ids)\n if used + t > budget_tokens:","source_hash":"832e3907fe931392f70bd4f4cbdd7f39b18fafc933f21a08ac724b5f97582194","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/priors.py","uri":"program://Program_Conditioned_Adapter/file/modules/priors.py","kind":"file","name":"modules/priors.py","path":"modules/priors.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"# kbann_priors derivation, round_lora utilities (move from run_repo_adapter.py/build_repo_adapter.py)","source_hash":"9c70796ea58c2b80959642f1ca2f16d8f440ff5e286421f865314e6840912f36","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/targets.py","uri":"program://Program_Conditioned_Adapter/file/modules/targets.py","kind":"file","name":"modules/targets.py","path":"modules/targets.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":18,"code":"from typing import Optional, Dict, Tuple\n\ndef parse_target_shapes(arg: Optional[str]) -> Optional[Dict[str, Tuple[int, int]]]:\n if not arg:\n return None\n result: Dict[str, Tuple[int, int]] = {}\n try:\n parts = [p.strip() for p in str(arg).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p or \":\" not in p:\n continue\n name, dims = p.split(\"=\", 1)\n a, b = dims.split(\":\", 1)\n result[name.strip()] = (int(a), int(b))\n return result or None\n except Exception:\n return None\n","source_hash":"0ce757281692921280250e1e33a68a68b4dd9db9013497f1b9ea4450c24718d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/capacity.py","uri":"program://Program_Conditioned_Adapter/file/modules/capacity.py","kind":"file","name":"modules/capacity.py","path":"modules/capacity.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\n\n\ndef _parse_weights(spec: str) -> Dict[str, float]:\n # accept both \"program\" and \"repo\" as primary component key\n out: Dict[str, float] = {\"program\": 0.4, \"subgraph\": 0.4, \"question\": 0.2}\n try:\n parts = [p.strip() for p in str(spec).split(\",\") if p.strip()]\n for p in parts:\n if \"=\" not in p:\n continue\n k, v = p.split(\"=\", 1)\n out[str(k).strip()] = float(v)\n except Exception:\n pass\n s = float(sum(max(0.0, v) for v in out.values()))\n if s > 0:","source_hash":"05e33fad6c4d97f0d8b3e970430186b95b71f20b74740aa0b07b48132b21c2a2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/retrieval_policy.py","uri":"program://Program_Conditioned_Adapter/file/modules/retrieval_policy.py","kind":"file","name":"modules/retrieval_policy.py","path":"modules/retrieval_policy.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Dict, Tuple, Iterable, Optional, List, Set\nimport math\n\nfrom .program_graph import ProgramGraph, Entity, Edge, Span\n\n\n@dataclass\nclass RetrievalMix:\n sim: float = 0.6\n struct: float = 0.4\n temp: float = 0.7\n\n @staticmethod\n def parse(spec: Optional[str], temp: Optional[float]) -> \"RetrievalMix\":\n if not spec:\n return RetrievalMix(temp=(temp if isinstance(temp, (int, float)) else 0.7))\n w_sim = 0.0\n w_struct = 0.0","source_hash":"6f111be18049fe358cd574315edf2941ad2bc49294b63aa9e37331dabf2c8308","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/provenance.py","uri":"program://Program_Conditioned_Adapter/file/modules/provenance.py","kind":"file","name":"modules/provenance.py","path":"modules/provenance.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import subprocess\nfrom typing import Optional\n\n\ndef git_commit_sha(repo_root: str) -> Optional[str]:\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:\n return None\n\ndef git_tree_sha(repo_root: str) -> Optional[str]:\n \"\"\"Return the HEAD tree SHA if available (pins exact tracked file set).\"\"\"\n try:\n sha = subprocess.check_output(\n [\"git\", \"-C\", repo_root, \"rev-parse\", \"HEAD^{tree}\"], stderr=subprocess.DEVNULL, text=True\n ).strip()\n return sha if sha else None\n except Exception:","source_hash":"1681deb157113b893a10b6d1d63ce8939073f501432c3ab1971b7fcf75942fa3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/adapter.py","uri":"program://Program_Conditioned_Adapter/file/modules/adapter.py","kind":"file","name":"modules/adapter.py","path":"modules/adapter.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport numpy as np\nimport math\nimport os\nimport json\nimport re\nfrom .embedding import _stable_hash\n\ndef _make_random_matrix(shape: Tuple[int, int], *, seed: int) -> np.ndarray:\n rng = np.random.default_rng(seed)\n # Xavier uniform\n limit = math.sqrt(6.0 / float(shape[0] + shape[1]))\n return rng.uniform(-limit, limit, size=shape).astype(np.float32)\n\ndef generate_lora_from_embedding(\n z: np.ndarray,\n *,\n d_model: int,\n num_layers: int,\n rank: int = 8,\n seed: int = 0,","source_hash":"b3a3d83adc4e6e7e862319c4ece6e97abf2b17399e2a67e387c23a34a8c89120","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/mixing.py","uri":"program://Program_Conditioned_Adapter/file/modules/mixing.py","kind":"file","name":"modules/mixing.py","path":"modules/mixing.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# delta-cap guard (lift from modular.py)\nimport os\nimport re\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\n\nfrom blocks.targets import targets_map\nfrom blocks.utils import getattr_nested\n\ndef register_hook_mixed_adapters(\n model: Any,\n base_layers: List[Dict[str, Dict[str, np.ndarray]]],\n sub_layers: Optional[List[Dict[str, Dict[str, np.ndarray]]]],\n *,\n alpha_star: float,\n g_sub: float,\n rank: int,\n beta: float,\n target_weights: Optional[Dict[str, float]] = None,","source_hash":"3aaab206d84ec7b5730e0188b04bb9cad54225c7fd167810018dae6d680836c5","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/telemetry.py","uri":"program://Program_Conditioned_Adapter/file/modules/telemetry.py","kind":"file","name":"modules/telemetry.py","path":"modules/telemetry.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"# telemetry meta assembly and write (from run_repo_adapter.py)","source_hash":"fd0ec51c4fcadc9d7c93e0efda60fd8e94ecf12cd65e99e5d50b96ed5decaba1","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/program_state.py","uri":"program://Program_Conditioned_Adapter/file/modules/program_state.py","kind":"file","name":"modules/program_state.py","path":"modules/program_state.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, asdict\nfrom typing import Any, Dict, List, Tuple, Optional\nimport json\nimport hashlib\nimport os\nimport time\n\n\n@dataclass\nclass ProgramState:\n root: str\n candidates_modules: List[str]\n candidates_files: List[str]\n citations: List[Tuple[str, int, int]]\n vec: Optional[List[float]] = None\n vec_weight: float = 1.0\n H: float = 0.0\n behavior_log: List[Dict[str, Any]] = None # type: ignore[assignment]\n","source_hash":"0e4e6fd577a0dfbc60aa6cd008b4b4e7e9713f145038153345ebff4284b53541","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/__init__.py","uri":"program://Program_Conditioned_Adapter/file/modules/__init__.py","kind":"file","name":"modules/__init__.py","path":"modules/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":6,"code":"from .adapter import (\n generate_lora_from_embedding,\n generate_lora_from_embedding_torch,\n save_npz,\n load_adapters_npz\n)","source_hash":"d8ffba35d5c3a727aa4f6bc6c7792cbc4f08535cc436d7447b9fd4cc141d7b57","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/runner_core.py","uri":"program://Program_Conditioned_Adapter/file/modules/runner_core.py","kind":"file","name":"modules/runner_core.py","path":"modules/runner_core.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom typing import Dict, List, Any, Tuple\n\nfrom .program_graph import ProgramGraph, Entity\nfrom .retrieval_policy import RetrievalPolicy\nfrom .citations import CitationManager, CitationPolicy\n\n\ndef select_region(query: str, pg: ProgramGraph, policy: RetrievalPolicy, top_k: int = 16) -> List[str]:\n scores = policy.score_entities(query, pg)\n ids = sorted(scores.keys(), key=lambda k: scores.get(k, 0.0), reverse=True)[:max(1, int(top_k))]\n return ids\n\n\ndef prepare_citations(units: List[Dict[str, Any]], region_entity_ids: List[str], pg: ProgramGraph, citations_policy: Dict[str, Any], manifest: Dict[str, Any]) -> List[Dict[str, Any]]:\n cm = CitationManager(\n policy=CitationPolicy(\n enforce=bool(citations_policy.get(\"enforce\", True)),\n per_paragraph=bool(citations_policy.get(\"per_paragraph\", False)),\n repair=bool(citations_policy.get(\"repair\", True)),","source_hash":"b602b689f9597f4953f5e8fb2d98e89141b2eeeb7e2ac5b723a8c919e3ffeac2","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/interpret.py","uri":"program://Program_Conditioned_Adapter/file/modules/interpret.py","kind":"file","name":"modules/interpret.py","path":"modules/interpret.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# ActivationTracer hooks, capture pipeline, write JSON (lift from run_repo_adapter.py)\n\nfrom typing import Dict, Tuple\nimport torch\n\n\n\ndef is_block(name: str, _m: torch.nn.Module) -> bool:\n if not name.startswith(\"model.layers.\"):\n return False\n rest = name[len(\"model.layers.\"):]\n return rest.isdigit() and \".\" not in rest\n\n\ndef block_out_hook(_key: str, _m: torch.nn.Module, _inputs: Tuple[torch.Tensor, ...], output: torch.Tensor) -> torch.Tensor | None:\n try:\n if isinstance(output, torch.Tensor):\n return output\n if isinstance(output, (tuple, list)) and output:\n v = output[0]\n return v if isinstance(v, torch.Tensor) else None","source_hash":"c5a667f6e9c66d400206c5946e5f83a9140f312b84b3ce2df8486feec5d279a9","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/peft.py","uri":"program://Program_Conditioned_Adapter/file/modules/peft.py","kind":"file","name":"modules/peft.py","path":"modules/peft.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from typing import Dict, Any, List, Optional\nimport os\nimport re\nfrom model.inspect import detect_target_names_from_model_full\n\ndef infer_target_names(model_id: str) -> Dict[str, str]:\n names = detect_target_names_from_model_full(model_id, target_regex=None) or {}\n # names maps short -> path within the first layer subtree\n # Example: {\"q_proj\": \"self_attn.q_proj\", \"o_proj\": \"self_attn.o_proj\", ...}\n return names\n\ndef save_peft_like(out_dir: str, adapters: Dict[str, Any], *, r: int, alpha: int, target_modules: List[str], bias: str = \"none\", int8: bool = False, target_paths: Optional[Dict[str, str]] = None) -> None:\n \"\"\"Write a minimal PEFT LoRA config + tensors for quick benchmarking.\n\n Note: This is a best-effort exporter; users may still need to map names depending on the model arch.\n \"\"\"\n try:\n import json as _json\n cfg = {\n \"peft_type\": \"LORA\",\n \"r\": int(r),","source_hash":"72e27cf7c5a21b205beb636007c461e0551496d8682fc27ece0a53abf0c30e4a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/runner.py","uri":"program://Program_Conditioned_Adapter/file/modules/runner.py","kind":"file","name":"modules/runner.py","path":"modules/runner.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport json\nimport time\nfrom typing import Optional, Tuple, List, Dict, Any, Set\n\nimport numpy as np\nimport torch\nfrom data.tokenizer import LocalLlamaTokenizer\n\nfrom modules.adapter import (\n load_adapters_npz,\n generate_lora_from_embedding,\n)\nfrom model.llama_bootstrap import build_local_llama_from_snapshot\nfrom model.hf_snapshot import ensure_snapshot\nfrom blocks.targets import targets_map\nfrom blocks.inspect import infer_target_shapes\nfrom model.inspect import detect_target_names_from_model_full\nfrom modules.mixing import (","source_hash":"11be15f8459cb98c8b70b55a10f09709239dcc7bf8ddb18fbfb933e84a9fb858","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:modules/citations.py","uri":"program://Program_Conditioned_Adapter/file/modules/citations.py","kind":"file","name":"modules/citations.py","path":"modules/citations.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Any, Optional, Tuple\nimport hashlib\nimport time\nimport re\n\nfrom .program_graph import ProgramGraph, Span, ResolvedAnchor\n\n\n@dataclass\nclass CitationPolicy:\n enforce: bool = True\n per_paragraph: bool = False\n repair: bool = True\n\n\ndef _extract_identifier_tokens(text: str) -> List[str]:\n toks = re.findall(r\"`([A-Za-z_][A-Za-z0-9_\\.]+)`\", text or \"\")\n toks2 = re.findall(r\"\\b([A-Za-z_][A-Za-z0-9_]*)\\s*\\(\", text or \"\")","source_hash":"0ed4640f87c5d00905e08557e24dc403d0f9b6e41bbc3c74dd7a996b0ed1a3d3","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/dataset_trainer/run_eval_datasets.py","uri":"program://Program_Conditioned_Adapter/file/examples/dataset_trainer/run_eval_datasets.py","kind":"file","name":"examples/dataset_trainer/run_eval_datasets.py","path":"examples/dataset_trainer/run_eval_datasets.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Dict, Any, List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\n\n@dataclass\nclass EvalPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\tstate_path: Path\n\n","source_hash":"351b6bd1bbaebdc3e50fe58e362d28dc76d73dc2fbd79cc0a344e38af6699749","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/dataset_trainer/run_llama_lora_train_mbpp.py","uri":"program://Program_Conditioned_Adapter/file/examples/dataset_trainer/run_llama_lora_train_mbpp.py","kind":"file","name":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","path":"examples/dataset_trainer/run_llama_lora_train_mbpp.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\nimport torch\nimport torch.nn.functional as F\n\n# Minimal dependencies: transformers + peft + datasets\n\n\n@dataclass\nclass TrainPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\n","source_hash":"477173693274c11f5b45f2d87dd0c6d3b5a72a018c2b0a91e6a5fda7e343bed6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/dataset_trainer/run_smoke_example.py","uri":"program://Program_Conditioned_Adapter/file/examples/dataset_trainer/run_smoke_example.py","kind":"file","name":"examples/dataset_trainer/run_smoke_example.py","path":"examples/dataset_trainer/run_smoke_example.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict, Any\nimport math\nimport random\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nROOT = EX_DIR\nPG_BACKEND = \"examples.scripts.dataset_graph:DatasetProgramGraph\"\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n","source_hash":"72b5c19646821abbe0c53b72778f365d64ba91e50a72a0c2ced947a5560e6e48","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/dataset_trainer/run_smoke_complete_example.py","uri":"program://Program_Conditioned_Adapter/file/examples/dataset_trainer/run_smoke_complete_example.py","kind":"file","name":"examples/dataset_trainer/run_smoke_complete_example.py","path":"examples/dataset_trainer/run_smoke_complete_example.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nROOT = EX_DIR\nPG_BACKEND = \"examples.scripts.dataset_graph:DatasetProgramGraph\"\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\ndef _run(cmd: List[str]) -> int:\n\tprint(\"[run]\", \" \".join(cmd))","source_hash":"635c64cf46b701cd565736e7f2bb376fa4d2a50e42f19c4c7e3fcef2eba4c8f6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/dataset_trainer/program_config.py","uri":"program://Program_Conditioned_Adapter/file/examples/dataset_trainer/program_config.py","kind":"file","name":"examples/dataset_trainer/program_config.py","path":"examples/dataset_trainer/program_config.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n\trequire_citations: bool = False\n\tcitations_per_paragraph: bool = False\n\tretrieval_policy: str = \"sim:0.5,struct:0.5\"\n\tretrieval_temp: float = 0.6\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n\tadapters_dir: Path\n\tout_dir: Path\n\ttelemetry_path: Path\n\tprogram_state_path: Optional[Path] = None","source_hash":"129f956b9b6cd80b8d3a84c0389fe0a66e0909d562fe3d459925869cdf56eb85","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/dataset_trainer/run_mbpp_test_eval.py","uri":"program://Program_Conditioned_Adapter/file/examples/dataset_trainer/run_mbpp_test_eval.py","kind":"file","name":"examples/dataset_trainer/run_mbpp_test_eval.py","path":"examples/dataset_trainer/run_mbpp_test_eval.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport sys\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict\n\nimport torch\nfrom torch.utils.data import Dataset\n\nROOT = EX_DIR\ntry:\n\tfrom .program_config import load_program_config # type: ignore # noqa: E402\nexcept Exception:\n\tfrom program_config import load_program_config # type: ignore # noqa: E402\n\n\nclass SimpleLmDataset(Dataset):\n\tdef __init__(self, tokenizer, texts: List[str], max_len: int = 128) -> None:\n\t\tself.tok = tokenizer","source_hash":"4a8585424d951009c2013785d3f0e83ddf8f75cbd6510db7aad9b48a641a59cc","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_planning/run_smoke_example.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_planning/run_smoke_example.py","kind":"file","name":"examples/python_repo_grounded_planning/run_smoke_example.py","path":"examples/python_repo_grounded_planning/run_smoke_example.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n\ndef main() -> None:\n cfg = load_program_config(str(SMOKE_REPO))","source_hash":"90a757abf38e6a5c96d8296e40920d18910bfd0e26b9576dd1ffe81cc7fb8786","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_planning/program_config.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_planning/program_config.py","kind":"file","name":"examples/python_repo_grounded_planning/program_config.py","path":"examples/python_repo_grounded_planning/program_config.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4,plan:0.2\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_dir: Path\n program_state_path: Optional[Path] = None\n","source_hash":"b27f3c01fb9e7472f3641d69d5ee00db578c62f3ab0f3ef5b59f063082bf7085","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_planning/smoke_plan.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_planning/smoke_plan.py","kind":"file","name":"examples/python_repo_grounded_planning/smoke_plan.py","path":"examples/python_repo_grounded_planning/smoke_plan.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nimport subprocess\n\nEX_DIR = Path(\"/data/transformer_10/examples/program_conditioned_adapter\")\nSMOKE_PROG = Path(\"/data/transformer_10/examples/program_conditioned_adapter/smoke_repo\")\nART_DIR = EX_DIR / \"examples\" / \"python_repo_grounded_planning\" / \"artifacts\" / \"smoke_planning\"\n\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\nEMIT_MOD = \"examples.python_repo_grounded_planning.emit_planning_knowledge\"\n\n\ndef _run(argv: list[str]) -> int:\n print(\"[run]\", \" \".join(argv))\n return subprocess.call(argv)\n\n\ndef main() -> None:\n ART_DIR.mkdir(parents=True, exist_ok=True)","source_hash":"f4465ac41da44f719cfa99a4831661f80813ad48f5e2f4e6c1b806c7a6f018e4","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_planning/emit_planning_knowledge.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_planning/emit_planning_knowledge.py","kind":"file","name":"examples/python_repo_grounded_planning/emit_planning_knowledge.py","path":"examples/python_repo_grounded_planning/emit_planning_knowledge.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport argparse\nimport json\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Optional, Set\nimport importlib\nimport numpy as np\n\n\ndef _load_symbol(path: str):\n mod, _, attr = path.partition(\":\")\n m = importlib.import_module(mod)\n return getattr(m, attr)\n\n\ndef _write_jsonl(path: Path, rows: List[Dict]) -> None:\n with path.open(\"w\", encoding=\"utf-8\") as f:\n for r in rows:\n f.write(json.dumps(r) + \"\\n\")","source_hash":"1577ce0ce444b6f02d027e228a131a0b738b5ee812804494ea9dad0b712240f0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/embedding.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/embedding.py","kind":"file","name":"examples/scripts/embedding.py","path":"examples/scripts/embedding.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from typing import Any, Dict, List, Optional, Tuple\nimport os\nimport re\nimport numpy as np\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\n\nfrom examples.scripts.code_graph import CodeGraph\n\ndef build_repo_embedding(\n repo_root: str,\n *,\n dim: int = EMBED_DIM_DEFAULT,\n seed: int = 0,\n include_text: bool = False,\n text_max_bytes: int = 0,\n max_text_tokens: int = 0,\n text_weight: float = 0.25,","source_hash":"70abd33382a7f11c15e60730fe6f0eadf3675016fd09e534bc3927fa63f91b54","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/mbpp_loader.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/mbpp_loader.py","kind":"file","name":"examples/scripts/mbpp_loader.py","path":"examples/scripts/mbpp_loader.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom typing import List, Tuple\n\n\ndef load_mbpp_texts(max_n: int = 128, split: str = \"train\") -> Tuple[List[str], int]:\n\t\"\"\"\n\tLoad MBPP via Hugging Face datasets and return a list of training texts.\n\tEach text is prompt + solution combined to form a code-aware LM sample.\n\tReturns (texts, total_available).\n\t\"\"\"\n\ttry:\n\t\tfrom datasets import load_dataset # type: ignore\n\texcept Exception:\n\t\treturn [], 0\n\t# Try common configs in order of availability\n\tds = None\n\tfor name in (\"mbpp\",):\n\t\tfor subset in (\"sanitized\", None):\n\t\t\ttry:\n\t\t\t\tif subset is None:","source_hash":"e4cb070c07272a433640e767ea3100b730d7c7d7a487bdd94c82694672add025","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/codegraph_core.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/codegraph_core.py","kind":"file","name":"examples/scripts/codegraph_core.py","path":"examples/scripts/codegraph_core.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str","source_hash":"27d2c9fbe4b62cf66c684b76a8ffba2ba3dad061314037a111259361fe252990","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/safety_policy_guard.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/safety_policy_guard.py","kind":"file","name":"examples/scripts/safety_policy_guard.py","path":"examples/scripts/safety_policy_guard.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict, List\n\n\ndef enforce(policy: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tApply simple safety gates like 'apply_patch requires tests_green' etc.\n\tReturns {'ok': bool, 'missing': [..]}.\n\t\"\"\"\n\treqs: List[str] = list(policy.get(\"requires\", []))\n\tpassed = []\n\tmissing = []\n\tfor r in reqs:\n\t\tif context.get(r, False):\n\t\t\tpassed.append(r)\n\t\telse:\n\t\t\tmissing.append(r)\n\treturn {\"ok\": len(missing) == 0, \"missing\": missing}\n\n","source_hash":"039ac5716a10a9122527dbe0e9a4365c91d500b4760d2c2b2574a20718c91588","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/benchmark_verifier.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/benchmark_verifier.py","kind":"file","name":"examples/scripts/benchmark_verifier.py","path":"examples/scripts/benchmark_verifier.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport subprocess\nfrom typing import Any, Dict, List, Optional\n\n\ndef run_benchmark_verifier(cmd: Optional[List[str]], env: Optional[Dict[str, str]] = None, timeout_sec: int = 120) -> bool:\n\t\"\"\"\n\tRuns an external verifier command (official checker) and returns True on success.\n\tWhen cmd is None, returns True (no-op) for smoke runs.\n\t\"\"\"\n\tif not cmd:\n\t\treturn True\n\ttry:\n\t\trc = subprocess.call(cmd, env=env, timeout=timeout_sec) # type: ignore\n\texcept Exception:\n\t\treturn False\n\treturn rc == 0\n\n\n","source_hash":"08cf695592dc82fbb11190555a8692e4f08a45c3b3bca0da4b74e9261aa57dbd","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/benchmark_adapter_synth.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/benchmark_adapter_synth.py","kind":"file","name":"examples/scripts/benchmark_adapter_synth.py","path":"examples/scripts/benchmark_adapter_synth.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\n\ndef synth_benchmark_heads(adapters_dir: str, datasets: List[str]) -> List[str]:\n\t\"\"\"\n\tSmoke-level synthesis of benchmark-aware adapter heads.\n\tCreates tiny shard stubs under adapters/shards/* so run.py can pick them up.\n\t\"\"\"\n\tadir = Path(adapters_dir)\n\tshards_dir = adir / \"shards\"\n\tshards_dir.mkdir(parents=True, exist_ok=True)\n\tcreated: List[str] = []\n\tfor spec in datasets:\n\t\tds_name = spec.replace(\":\", \"_\").replace(\"/\", \"_\")\n\t\tpath = shards_dir / f\"benchmark_{ds_name}_head.json\"","source_hash":"8342d0ac5598022ffad9e25e34a7c50ec930bd55f84a0db9d5495d3eefb74d9e","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/python_repo_graph.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/python_repo_graph.py","kind":"file","name":"examples/scripts/python_repo_graph.py","path":"examples/scripts/python_repo_graph.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nfrom typing import Iterable, List, Tuple, Dict, Optional, Set\n\nfrom modules.program_graph import (\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\nfrom .codegraph_core import CodeGraph, CGEntity, CGEdge\nfrom .repo_graph import RepoGraph, artifact_uri, program_id_for_repo, parse_program_uri\n\n\ndef _entity_uri(program_id: str, cg: CGEntity) -> str:\n if cg.kind == \"module\" or cg.kind == \"test_module\":\n resource = cg.name\n kind = \"module\"","source_hash":"8431b753d44b1a7766f1eec1f83f140a54db66b26eaa0d51a393859e7aabb751","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/repo_graph.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/repo_graph.py","kind":"file","name":"examples/scripts/repo_graph.py","path":"examples/scripts/repo_graph.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport re\nimport hashlib\nfrom typing import Iterable, List, Tuple, Dict, Optional, Set\n\nfrom modules.program_graph import (\n ProgramGraph,\n Entity,\n Edge,\n Artifact,\n Span,\n ResolvedAnchor,\n EntityId,\n)\n\n\ndef program_id_for_repo(repo_root: str) -> str:\n base = os.path.basename(os.path.abspath(repo_root)) or \"repo\"\n return base","source_hash":"910d52dd4d2a09d4d93df32e7ea0dab17810914c5fd1beba6d99bf2add4a0b60","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/tool_budget_allocator.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/tool_budget_allocator.py","kind":"file","name":"examples/scripts/tool_budget_allocator.py","path":"examples/scripts/tool_budget_allocator.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":15,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict\n\n\ndef allocate(budget: Dict[str, Any], expected_gain: float) -> Dict[str, Any]:\n\t\"\"\"\n\tGreedily allocate tokens/time by expected verifier-gain per unit cost (placeholder).\n\t\"\"\"\n\tout = dict(budget)\n\tout[\"allocated\"] = {\"tokens\": int(budget.get(\"tokens\", 64000) * 0.5)}\n\tout[\"expected_gain\"] = float(expected_gain)\n\treturn out\n\n","source_hash":"d16141703bdf7b6026a5c99db975c92fe798868286dc91eaa92586786bf28e71","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/dataset_graph.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/dataset_graph.py","kind":"file","name":"examples/scripts/dataset_graph.py","path":"examples/scripts/dataset_graph.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nfrom typing import Iterable, List, Tuple, Dict, Optional\n\nfrom modules.program_graph import (\n\tProgramGraph,\n\tEntity,\n\tEdge,\n\tArtifact,\n\tSpan,\n\tResolvedAnchor,\n\tEntityId,\n)\nfrom .repo_graph import artifact_uri, program_id_for_repo, parse_program_uri\n\n\nclass DatasetProgramGraph(ProgramGraph):\n\t\"\"\"\n\tMinimal Dataset→ProgramGraph backend.\n\tRepresents benchmark datasets (MBPP, SWE-bench, etc.) as entities:","source_hash":"a90d4ac27e2b320e678484e0e3095dd2c0fc4466e6bbbda7709487a7a6738471","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/registry.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/registry.py","kind":"file","name":"examples/scripts/registry.py","path":"examples/scripts/registry.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport json\nimport os\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\n\ndef _default_registry_path() -> str:\n\tartifacts_dir = examples_dir / \"artifacts\"\n\tartifacts_dir.mkdir(parents=True, exist_ok=True)\n\treturn str(artifacts_dir / \"adapter_registry.json\")\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\tif not os.path.isfile(path):\n\t\treturn {}\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read()) or {}","source_hash":"7e463578e34d6c75dc487a8381b3ce3d5d4ee5e79be22fd32a9f1a5a3c77300a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/pca_core.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/pca_core.py","kind":"file","name":"examples/scripts/pca_core.py","path":"examples/scripts/pca_core.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple, Iterable\n\n\n# Canonical PCA Interface (portable utility helpers)\n\ndef select_subgraph(goal: Dict[str, Any], program_facts: Dict[str, Any]) -> Dict[str, Any]:\n\t\"\"\"\n\tSelect(): question-aware selection of subgraph/segments/windows from the program.\n\tHeuristic: prefer recently changed files, failing spans, or symbols mentioned in goal text.\n\t\"\"\"\n\ttext = (goal.get(\"text\") or goal.get(\"prompt\") or \"\").lower()\n\treturn {\"windows\": [], \"symbols\": [], \"goal_terms\": text.split()[:16]}\n\n\ndef pack_with_anchors(sources: Iterable[Path], windows: List[Tuple[str, int, int]]) -> Dict[str, Any]:\n\t\"\"\"\n\tPack(): deterministic context packaging with anchored snippets (path:line windows).","source_hash":"6761beda60523c17db452378eabd5d869b6c170ac5e3637369cf876bf538c445","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/program_trainer.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/program_trainer.py","kind":"file","name":"examples/scripts/program_trainer.py","path":"examples/scripts/program_trainer.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n\ndef _read_json(path: str) -> Dict[str, Any]:\n\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\treturn json.loads(fh.read())\n\n\ndef run_program_training(adapters_dir: str, plan_json: str, out_json: str) -> str:\n\t\"\"\"\n\tSmoke-level trainer hook used by examples: simulates a training pass\n\tgrounded to a ProgramGraph-based TrainingPlan by producing a summary\n\tand a trained marker under adapters_dir.\n\t\"\"\"\n\tadapters_dir_abs = os.path.abspath(adapters_dir)","source_hash":"6a30524660eb39f1f7c29bbffef13569864df494a86e5879f0a0168777a0540b","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/citation_enforcer.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/citation_enforcer.py","kind":"file","name":"examples/scripts/citation_enforcer.py","path":"examples/scripts/citation_enforcer.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":14,"code":"from __future__ import annotations\n\nfrom typing import Any, Dict, List\n\n\ndef require_citations(outputs: Dict[str, Any], min_count: int = 1) -> Dict[str, Any]:\n\t\"\"\"\n\tEnsure outputs contain at least min_count citation anchors.\n\t\"\"\"\n\tcites = outputs.get(\"citations\") or []\n\tok = isinstance(cites, list) and len(cites) >= int(min_count)\n\treturn {\"ok\": ok, \"count\": len(cites) if isinstance(cites, list) else 0}\n\n","source_hash":"5fdb3bc8ad9091a9ded3bef7b0d3ac4d9960b3c93cef3b0ffea3bccded5956d8","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/code_graph.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/code_graph.py","kind":"file","name":"examples/scripts/code_graph.py","path":"examples/scripts/code_graph.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport ast\nimport re\nimport json\nimport time # noqa: F401\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Tuple, Optional\ntry:\n import pathspec # type: ignore\nexcept Exception: # pragma: no cover\n pathspec = None # type: ignore\n\n\n@dataclass\nclass Symbol:\n fqn: str\n name: str\n qualname: str\n kind: str # module|class|function|variable\n module: str\n file: str","source_hash":"e28a996e3c22e28318f33e970701e56c53d2becd271f9615a0269e11078879c6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/scripts/datasets_loader.py","uri":"program://Program_Conditioned_Adapter/file/examples/scripts/datasets_loader.py","kind":"file","name":"examples/scripts/datasets_loader.py","path":"examples/scripts/datasets_loader.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport json\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple, Any, Iterable\n\n\ndef _read_json(path: Path) -> Dict[str, Any]:\n\ttry:\n\t\twith open(path, \"r\", encoding=\"utf-8\") as fh:\n\t\t\treturn json.loads(fh.read())\n\texcept Exception:\n\t\treturn {}\n\n\ndef _load_local_jsonl(fp: Path, text_key: str = \"text\", max_n: int | None = None) -> List[str]:\n\ttexts: List[str] = []\n\tif not fp.exists():\n\t\treturn texts\n\twith open(fp, \"r\", encoding=\"utf-8\") as fh:","source_hash":"a2681e7cd4a6637fe7e308616893d0611160a5134ef5301a3c142c385e994bee","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/emit_repository_knowledge.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/emit_repository_knowledge.py","kind":"file","name":"examples/python_repo_grounded_qa/emit_repository_knowledge.py","path":"examples/python_repo_grounded_qa/emit_repository_knowledge.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport json\nfrom pathlib import Path\nfrom datetime import datetime, timezone\nfrom typing import Dict, Any\n\n\ndef _resolve_pg_ctor(pg_backend: str):\n mod, _, attr = pg_backend.partition(\":\")\n if not mod or not attr:\n raise ValueError(f\"Invalid ProgramGraph backend '{pg_backend}', expected 'module:ClassName'\")\n m = __import__(mod, fromlist=[attr])\n return getattr(m, attr)\n\n\ndef emit_repository_knowledge(repo_root: str, out_path: str, pg_backend: str) -> str:\n \"\"\"Emit a consolidated repository_knowledge.json with entities, edges, and artifact spans.\"\"\"\n repo_root_abs = os.path.abspath(repo_root)","source_hash":"788036183675721d2a0538396deab39f9584c71e443b631b06d85b030c5bb4af","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/repo_state.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/repo_state.py","kind":"file","name":"examples/python_repo_grounded_qa/repo_state.py","path":"examples/python_repo_grounded_qa/repo_state.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field, asdict\nfrom typing import Dict, List, Optional, Tuple, Set, Any\nimport os\nimport json\nimport time\nimport math\nimport numpy as np\n\n\ndef _now_ts() -> float:\n try:\n return time.time()\n except Exception:\n return 0.0\n\n\ndef _unit(x: np.ndarray) -> np.ndarray:\n n = float(np.linalg.norm(x))\n return (x / n) if n > 0 else x","source_hash":"01d60c404427d09954052326c123c81742d53a0815cba5904a200f0d8f0a3fae","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/run_smoke_example.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/run_smoke_example.py","kind":"file","name":"examples/python_repo_grounded_qa/run_smoke_example.py","path":"examples/python_repo_grounded_qa/run_smoke_example.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport sys\nimport subprocess\nfrom pathlib import Path\n\n# Resolve program_conditioned_adapter directory directly from this file\nEX_DIR = Path(__file__).resolve().parents[2]\nSMOKE_REPO = EX_DIR / \"smoke_repo\"\n# Backend class name (implemented in this example package)\nPG_BACKEND = \"examples.scripts.python_repo_graph:PythonRepoGraph\"\n# Program configuration loader\nfrom .program_config import load_program_config # noqa: E402\n\n\ndef _run(cmd: list[str]) -> int:\n print(\"[run]\", \" \".join(cmd))\n return subprocess.call(cmd)\n\n","source_hash":"92ed0427a371a73d25260fec5c834daa2157daf6718fb2ceb563e5669b425dc0","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/program_config.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/program_config.py","kind":"file","name":"examples/python_repo_grounded_qa/program_config.py","path":"examples/python_repo_grounded_qa/program_config.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional\n\n\n@dataclass(frozen=True)\nclass ProgramContracts:\n require_citations: bool = True\n citations_per_paragraph: bool = False\n retrieval_policy: str = \"sim:0.6,struct:0.4\"\n retrieval_temp: float = 0.7\n\n\n@dataclass(frozen=True)\nclass ProgramPaths:\n adapters_dir: Path\n knowledge_path: Path\n program_state_path: Optional[Path] = None\n","source_hash":"478bd1b12009db72e078bf222582a9c2cdf8cdedc2c722fc22424d4953389d08","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/modules/codegraph_core.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/modules/codegraph_core.py","kind":"file","name":"examples/python_repo_grounded_qa/modules/codegraph_core.py","path":"examples/python_repo_grounded_qa/modules/codegraph_core.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport ast\nimport hashlib\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Tuple, Iterable, Set\n\n\n@dataclass(frozen=True)\nclass FileSpan:\n file: str # absolute path\n start_line: int # 1-based inclusive\n end_line: int # 1-based inclusive\n\n\n@dataclass\nclass CGEntity:\n id: str # stable id (e.g., fqn)\n kind: str # module|function|class|test\n name: str","source_hash":"f2361b36cbe6c447d9c9ffa4981618b154f69706cc8653bd33341c8548a89cf7","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/modules/prompts.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/modules/prompts.py","kind":"file","name":"examples/python_repo_grounded_qa/modules/prompts.py","path":"examples/python_repo_grounded_qa/modules/prompts.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from typing import List\nfrom ..code_graph import CodeGraph\n\ndef build_prompts_for_module(g: CodeGraph, module: str, max_q: int = 3) -> List[str]:\n \"\"\"Construct a few simple, verifiable prompts for a module using symbol names and doc headers.\n\n Prefers questions that can be answered via local context/citations.\n \"\"\"\n prompts: List[str] = []\n # Symbols defined in the module\n defs = list(g.defs_in(module) or [])\n # Ask for an explanation of the module\n prompts.append(f\"Explain the key functions/classes in {module}. Cite path:line for each claim.\")\n # Ask about up to two concrete defs\n for fqn in defs[:2]:\n name = fqn.split(\".\")[-1]\n prompts.append(f\"What does `{name}` do in {module}? Show signature and cite path:line.\")\n # Unique and bounded\n uniq: List[str] = []\n for p in prompts:\n if p not in uniq:","source_hash":"02441a59f7347f3704d9f17e1da09fb5bcbaabbc0806534a9e84a4f7b356d79f","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/modules/verify.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/modules/verify.py","kind":"file","name":"examples/python_repo_grounded_qa/modules/verify.py","path":"examples/python_repo_grounded_qa/modules/verify.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# citation regex check, pytest test runner\n\nfrom typing import Dict, List, Any\nimport subprocess\nfrom ..code_graph import CodeGraph\nimport os\n\n\ndef verify_with_tests(g: CodeGraph, module: str, *, repo_root: str, env: Dict[str, str]) -> bool:\n \"\"\"Run mapped tests for a module if available; return True if all selected tests pass.\n\n Uses CodeGraph's tests mapping (best-effort). If no tests mapped, returns True as a soft pass.\n \"\"\"\n nodes = g.pytest_nodes_by_module.get(module, [])\n # Also include module-level mapping\n if not nodes:\n nodes = g.tests_for_module(module)\n if not nodes:\n return True # no tests to run; accept\n # Build pytest command\n cmd = [\"pytest\", \"-q\"]","source_hash":"7b90cee7bbbb8a85b90f2835a6b0b5748ee4bd8fd935a0182c98003f1c7de3eb","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/modules/__init__.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/modules/__init__.py","kind":"file","name":"examples/python_repo_grounded_qa/modules/__init__.py","path":"examples/python_repo_grounded_qa/modules/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":3,"code":"# Example-specific utilities for the Python repo backend\n\n","source_hash":"6e4d8bb3400a9571462e81741b7380439d0bd31b3b1e46320e13dfe59619b7b6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/modules/selection.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/modules/selection.py","kind":"file","name":"examples/python_repo_grounded_qa/modules/selection.py","path":"examples/python_repo_grounded_qa/modules/selection.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# question-aware selection, zoom-by-symbol, function-first candidate collection, name-match helpers\nimport os\nimport re\nfrom typing import Dict, List, Optional, Tuple\n\nfrom ..code_graph import CodeGraph\n\n\ndef _re_escape(s: str) -> str:\n try:\n import re as _re\n return _re.escape(s)\n except Exception:\n return s\n\n\ndef modules_from_symbols(repo_root: str, seeds: List[str], *, radius: int = 1, top_k: int = 8, ignore: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:\n g = CodeGraph.load_or_build(repo_root, ignore=[s for s in (ignore or []) if s])\n modules_set: Dict[str, bool] = {}\n files_set: Dict[str, bool] = {}\n for s in seeds:","source_hash":"407bd941ac949c3ccae34be6c13c6f7bcfdae0fc90ebae3cea1381b60d17c68d","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:examples/python_repo_grounded_qa/modules/tune.py","uri":"program://Program_Conditioned_Adapter/file/examples/python_repo_grounded_qa/modules/tune.py","kind":"file","name":"examples/python_repo_grounded_qa/modules/tune.py","path":"examples/python_repo_grounded_qa/modules/tune.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from __future__ import annotations\n\nimport os\nimport json\nimport time\nimport hashlib\nimport subprocess\nimport sys\nfrom typing import Dict, List, Tuple, Optional, Set\n\nfrom ..code_graph import CodeGraph\nfrom .prompts import build_prompts_for_module\nfrom .verify import verify_with_tests, extract_citations\nfrom modules.embedding import build_subgraph_embedding_from_graph\nfrom modules.adapter import (\n save_npz,\n generate_lora_from_embedding,\n)\nfrom model.inspect import detect_target_shapes_from_model_full, detect_target_shapes_from_model\n\n","source_hash":"c1e3ec462512de538f0c41140ec4e75fcdbe445da18db02db5814709ced92da6","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:scripts/trace_llama_parity.py","uri":"program://Program_Conditioned_Adapter/file/scripts/trace_llama_parity.py","kind":"file","name":"scripts/trace_llama_parity.py","path":"scripts/trace_llama_parity.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport json\nimport argparse\nfrom typing import Dict, Any, List\n\nimport torch\n\n\n@torch.no_grad()\ndef main() -> None:\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--prompt\", default=\"Hello\")\n p.add_argument(\"--device\", default=\"cuda\" if torch.cuda.is_available() else \"cpu\")\n p.add_argument(\"--dtype\", default=\"bfloat16\")\n p.add_argument(\"--max-layers\", type=int, default=8, help=\"Limit layers to trace for brevity\")\n p.add_argument(\"--save-logits\", action=\"store_true\", help=\"Save HF/local logits to disk to minimize RAM/VRAM use\")\n p.add_argument(\"--out-dir\", default=\"\", help=\"Directory to write saved tensors (defaults to cache-dir/trace)\")\n args = p.parse_args()\n","source_hash":"5e465dee5c4e53b58b1b305eb07f5d6468152b1ae2c910110fe4d01ee8b1fbd1","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:scripts/compare_tokenizers.py","uri":"program://Program_Conditioned_Adapter/file/scripts/compare_tokenizers.py","kind":"file","name":"scripts/compare_tokenizers.py","path":"scripts/compare_tokenizers.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport json\nfrom typing import List, Dict\n\n\ndef main() -> None:\n import argparse\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", required=True, help=\"HF model id or local snapshot path\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--samples\", type=int, default=0, help=\"Sample count from a default suite (0 uses all)\")\n p.add_argument(\"--backend\", default=\"local\", choices=[\"hf\",\"local\",\"pure\"], help=\"Which local tokenizer backend to compare against HF\")\n args = p.parse_args()\n\n from model.hf_snapshot import ensure_snapshot\n snap = ensure_snapshot(args.model, args.cache_dir)\n\n # HF reference tokenizer\n try:\n from transformers import AutoTokenizer # type: ignore\n except Exception as e:","source_hash":"573051bcd0f55c9b3d6e603a11bea0d116be369fd5f8e3c2eca701df323b6e37","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:scripts/diff_hf_local_weights.py","uri":"program://Program_Conditioned_Adapter/file/scripts/diff_hf_local_weights.py","kind":"file","name":"scripts/diff_hf_local_weights.py","path":"scripts/diff_hf_local_weights.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport json\nimport argparse\nfrom typing import Dict, List, Tuple\n\nimport torch\n\n\ndef _ensure_snapshot(model_id: str, cache_dir: str) -> str:\n if os.path.isdir(model_id) and os.path.isfile(os.path.join(model_id, \"config.json\")):\n return model_id\n org_name = model_id.strip().split(\"/\")[-2:]\n if len(org_name) == 2:\n org, name = org_name\n dir1 = os.path.join(cache_dir, f\"models--{org}--{name}\", \"snapshots\")\n cands = []\n if os.path.isdir(dir1):\n cands.extend([os.path.join(dir1, d) for d in os.listdir(dir1)])\n cands = [p for p in cands if os.path.isfile(os.path.join(p, \"model.safetensors.index.json\"))]\n if cands:\n cands.sort(key=lambda p: os.path.getmtime(p), reverse=True)","source_hash":"138a3eaaa0b0d1529dc8346d14adec02d84cc753d5d0f3625decdd3d1032321a","truncated":false} {"repo_id":"Program_Conditioned_Adapter","entity_id":"file:scripts/verify_local_vs_hf.py","uri":"program://Program_Conditioned_Adapter/file/scripts/verify_local_vs_hf.py","kind":"file","name":"scripts/verify_local_vs_hf.py","path":"scripts/verify_local_vs_hf.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport sys\nimport json\nimport gc\nimport torch\nfrom typing import Tuple\nfrom model.hf_snapshot import ensure_snapshot\nfrom model.runtime_utils import local_logits_last\n\ndef main() -> None:\n import argparse\n p = argparse.ArgumentParser()\n p.add_argument(\"--model\", default=\"meta-llama/Llama-3.1-8B-Instruct\")\n p.add_argument(\"--cache-dir\", default=\"/data/transformer_10/checkpoints\")\n p.add_argument(\"--prompt\", default=\"Hello\")\n p.add_argument(\"--max-tokens\", type=int, default=64)\n p.add_argument(\"--gen-new\", type=int, default=32, help=\"Generate this many new tokens for HF/local side-by-side\")\n p.add_argument(\"--mode\", default=\"hf_guided\", choices=[\"independent\", \"hf_guided\", \"single_step\", \"independent_exact\", \"two_pass_exact\"], help=\"Verification mode\")\n p.add_argument(\"--dtype\", default=\"float32\", choices=[\"float32\", \"bfloat16\"], help=\"Computation dtype for both models during verify\")\n p.add_argument(\"--save-logits\", action=\"store_true\", help=\"Save HF/local last-step logits to disk to minimize RAM/VRAM use\")\n p.add_argument(\"--parity-exact\", action=\"store_true\", help=\"Use explicit matmul+float32 softmax attention for exact HF parity\")","source_hash":"3b25fa7e59fa0580e35ab3013aedc7c72da96a1594722b2ecb06cfb22853d1f1","truncated":false}