{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "1df5ac03", "metadata": {}, "outputs": [], "source": [ "!pip install -q \\\n", "datasets==4.8.4 \\\n", "groq==1.2.0 \\\n", "openenv-core==0.2.3 \\\n", "sentence-transformers==5.4.1 \\\n", "torch==2.11.0 \\\n", "transformers==5.6.2 \\\n", "trl==1.2.0\n", "\n", "print(\"Dependencies installed successfully!\")" ] }, { "cell_type": "code", "execution_count": null, "id": "26b26888", "metadata": {}, "outputs": [], "source": [ "from __future__ import annotations\n", "\n", "import argparse\n", "import json\n", "import random\n", "import os\n", "import re\n", "import time\n", "from pathlib import Path\n", "from typing import Optional\n", "\n", "try:\n", " from dotenv import load_dotenv\n", " load_dotenv()\n", "except ImportError:\n", " # Keep script runnable even if python-dotenv is not installed.\n", " pass\n", "\n", "\n", "try:\n", " import matplotlib\n", " matplotlib.use(\"Agg\") # non-interactive backend for servers\n", " import matplotlib.pyplot as plt\n", " HAS_PLT = True\n", "except ImportError:\n", " HAS_PLT = False\n", "\n", "HAS_UNSLOTH = False\n", "FastLanguageModel = None\n", "\n", "\n", "try:\n", " from trl import GRPOConfig, GRPOTrainer\n", " HAS_TRL = True\n", " print(\"TRL loaded OK\")\n", "except Exception as e:\n", " print(f\"TRL FAILED: {e}\")\n", " HAS_TRL = False\n", "\n", "try:\n", " from datasets import Dataset\n", " HAS_DATASETS = True\n", "except ImportError:\n", " HAS_DATASETS = False\n", "\n", "try:\n", " from transformers import AutoModelForCausalLM, AutoTokenizer\n", " HAS_TRANSFORMERS = True\n", "except ImportError:\n", " HAS_TRANSFORMERS = False\n", "\n", "# Local imports\n", "from envs.environment import WorkSpaceEnvironment\n", "from models.schemas import WorkSpaceAction" ] }, { "cell_type": "code", "execution_count": null, "id": "12225440", "metadata": {}, "outputs": [], "source": [ "TOPICS_FILE = Path(\"ai_pm_prompts.json\")\n", "OUTPUT_DIR = Path(\"artifacts/grpo_state_based\")" ] }, { "cell_type": "code", "execution_count": null, "id": "8b3d0fd0", "metadata": {}, "outputs": [], "source": [ "HIDDEN_CONSTRAINTS = {\n", " \"Finance\": \"Budget must not exceed $50k.\",\n", " \"Security\": \"Must include biometric 2FA.\",\n", " \"UX\": \"Checkout must be a single click.\",\n", "}\n", "\n", "# ── Action templates the model should learn to produce\n", "ORACLE_ACTIONS = {\n", " \"ask_finance\": json.dumps({\n", " \"action_type\": \"message_expert\", \"target\": \"Finance\",\n", " \"content\": \"What is the hard budget ceiling the PRD must respect for launch?\"\n", " }),\n", " \"ask_security\": json.dumps({\n", " \"action_type\": \"message_expert\", \"target\": \"Security\",\n", " \"content\": \"What authentication controls must the PRD include? Is biometric 2FA required?\"\n", " }),\n", " \"ask_ux\": json.dumps({\n", " \"action_type\": \"message_expert\", \"target\": \"UX\",\n", " \"content\": \"What checkout experience is required? Should we target a single-click flow?\"\n", " }),\n", " \"propose_draft\": json.dumps({\n", " \"action_type\": \"propose_draft\", \"target\": \"All\",\n", " \"content\": (\n", " \"PRD Draft:\\n\"\n", " \"1. Budget: Launch scope capped at $50k.\\n\"\n", " \"2. Security: Biometric 2FA required for login and sensitive actions.\\n\"\n", " \"3. UX: Single-click checkout flow.\"\n", " ),\n", " }),\n", " \"submit_final\": json.dumps({\n", " \"action_type\": \"submit_final\", \"target\": None,\n", " \"content\": (\n", " \"Final PRD:\\n\"\n", " \"1. Budget cap: All launch costs must stay at or below $50k.\\n\"\n", " \"2. Security: The app must enforce biometric 2FA for all authentication.\\n\"\n", " \"3. UX: Checkout must be implemented as a true single-click experience.\"\n", " ),\n", " }),\n", "}" ] }, { "cell_type": "markdown", "id": "760766ee", "metadata": {}, "source": [ "### Load Topic" ] }, { "cell_type": "code", "execution_count": null, "id": "65205766", "metadata": {}, "outputs": [], "source": [ "def load_topics(limit: int = 50) -> list[str]:\n", " if TOPICS_FILE.exists():\n", " with TOPICS_FILE.open() as f:\n", " return json.load(f)[:limit]\n", " return [\n", " \"Draft a Mobile App PRD for a FinTech startup targeting emerging markets.\",\n", " \"Build an AI-driven healthcare platform for enterprise customers.\",\n", " \"Create a SaaS analytics tool for regulatory-heavy industries.\",\n", " \"Design a gaming platform for Gen Z users with real-time features.\",\n", " \"Develop a cross-platform product for low-bandwidth regions.\",\n", " ]\n" ] }, { "cell_type": "markdown", "id": "7e76846b", "metadata": {}, "source": [ "### Parse Action (Handle fenced responses like ```json ... ```)" ] }, { "cell_type": "code", "execution_count": null, "id": "6b91f1f9", "metadata": {}, "outputs": [], "source": [ "def parse_action(text: str) -> Optional[WorkSpaceAction]:\n", " \"\"\"Parse a JSON action from model output. Returns None on failure.\"\"\"\n", " text = text.strip()\n", " if text.startswith(\"```\"):\n", " text = re.sub(r\"^```(?:json)?\\s*\", \"\", text)\n", " text = re.sub(r\"\\s*```$\", \"\", text)\n", "\n", " text = text.strip()\n", " try:\n", " # Fast path: entire completion is valid JSON.\n", " return WorkSpaceAction(**json.loads(text))\n", " except Exception:\n", " pass\n", "\n", " # Fallback: find the first JSON object that includes action_type.\n", " try:\n", " idx = text.find(\"{\")\n", " while idx != -1:\n", " depth = 0\n", " for end in range(idx, len(text)):\n", " if text[end] == \"{\":\n", " depth += 1\n", " elif text[end] == \"}\":\n", " depth -= 1\n", " if depth == 0:\n", " candidate = text[idx:end + 1]\n", " if '\"action_type\"' in candidate:\n", " return WorkSpaceAction(**json.loads(candidate))\n", " break\n", " idx = text.find(\"{\", idx + 1)\n", " return None\n", " except Exception:\n", " return None" ] }, { "cell_type": "markdown", "id": "8c6b5cba", "metadata": {}, "source": [ "### CONTRAINTS" ] }, { "cell_type": "code", "execution_count": null, "id": "4d01ee84", "metadata": {}, "outputs": [], "source": [ "def lexical_overlap(a: str, b: str) -> float:\n", " \"\"\"Simple token overlap score in [0,1] for dense content shaping.\"\"\"\n", " toks_a = set(re.findall(r\"[a-z0-9]+\", (a or \"\").lower()))\n", " toks_b = set(re.findall(r\"[a-z0-9]+\", (b or \"\").lower()))\n", " if not toks_a or not toks_b:\n", " return 0.0\n", " inter = len(toks_a & toks_b)\n", " denom = max(1, min(len(toks_a), len(toks_b)))\n", " return inter / denom\n", "\n", "\n", "def format_discovered(env: WorkSpaceEnvironment) -> str:\n", " lines = []\n", " for name, expert in env.state().experts.items():\n", " status = \"✓ DISCOVERED\" if expert.constraint_discovered_by_agent else \"? unknown\"\n", " lines.append(f\" {name}: {status}\")\n", " return \"\\n\".join(lines)\n" ] }, { "cell_type": "markdown", "id": "3473c0ef", "metadata": {}, "source": [ "### AGENT PROMPT" ] }, { "cell_type": "code", "execution_count": null, "id": "76497a28", "metadata": {}, "outputs": [], "source": [ "AGENT_SYSTEM_PROMPT = \"\"\"You are an expert AI Project Manager in a multi-stakeholder negotiation.\n", "\n", "TASK: Produce a final PRD that satisfies ALL three experts — Finance, Security, and UX.\n", "Each expert holds a hidden constraint you must discover through targeted questions.\n", "\n", "STRATEGY:\n", " 1. Message each expert INDIVIDUALLY (not \"All\") to discover their constraint.\n", " 2. Once all constraints are known, propose a draft.\n", " 3. Refine if needed, then submit_final before turn 15.\n", "\n", "ANTI-PATTERNS (will be penalized):\n", " - Broadcasting to \"All\" when gathering requirements → -0.3 penalty\n", " - Repeating a question already answered → -0.4 penalty\n", " - Submitting without discovering constraints → low harmonic mean score\n", "\n", "CURRENT DISCOVERED CONSTRAINTS:\n", "{discovered}\n", "\n", "You are a strict API. Respond with ONLY raw, valid JSON. \n", "DO NOT wrap the JSON in markdown formatting (no ```json). \n", "DO NOT output any conversational text.\n", "End your response immediately after the closing }}.\n", "{{\"action_type\": \"message_expert\" | \"propose_draft\" | \"submit_final\",\n", " \"target\": \"Finance\" | \"Security\" | \"UX\" | \"All\" | null,\n", " \"content\": \"your message\"}}\"\"\"\n" ] }, { "cell_type": "markdown", "id": "58fa5c08", "metadata": {}, "source": [ "### STATE PROMPT FOR DATASET GENERATION\n", "\n", "- Use Qwen-compatible ChatML formatting to improve stop behavior.\n", "- Qwen instruct models are much more likely to terminate with <|im_end|>\n", "- when prompted in this native format." ] }, { "cell_type": "code", "execution_count": null, "id": "4e24ca2e", "metadata": {}, "outputs": [], "source": [ "def build_state_prompt(\n", " topic: str,\n", " turn: int,\n", " feedback_so_far: str,\n", " discovered: str,\n", " conversation_history: str = \"\",\n", ") -> str:\n", " \"\"\"\n", " Build a prompt representing a specific game state.\n", " This is what gets fed to GRPOTrainer as the 'prompt' field.\n", " \"\"\"\n", " system = AGENT_SYSTEM_PROMPT.format(discovered=discovered)\n", "\n", " user_content = (\n", " f\"NEGOTIATION TASK: {topic}\\n\\n\"\n", " f\"TURN: {turn}/15\\n\\n\"\n", " )\n", "\n", " if conversation_history:\n", " user_content += f\"CONVERSATION SO FAR:\\n{conversation_history}\\n\\n\"\n", "\n", " user_content += f\"LATEST FEEDBACK:\\n{feedback_so_far}\\n\\nWhat is your next action?\"\n", "\n", "\n", " return (\n", " f\"<|im_start|>system\\n{system}<|im_end|>\\n\"\n", " f\"<|im_start|>user\\n{user_content}<|im_end|>\\n\"\n", " f\"<|im_start|>assistant\\n\"\n", " )" ] }, { "cell_type": "markdown", "id": "7f8cf7ea", "metadata": {}, "source": [ "### State Dataset Builder" ] }, { "cell_type": "code", "execution_count": null, "id": "60877e63", "metadata": {}, "outputs": [], "source": [ "def build_state_dataset(topics: list[str], states_per_topic: int = 5) -> list[dict]:\n", " \"\"\"\n", " Build a dataset of negotiation states using the EASY mode environment.\n", " Each record represents one (state → optimal_action) training example.\n", "\n", " We run oracle trajectories through the environment to get realistic\n", " expert feedback, then snapshot the state at each turn.\n", "\n", " This is the key fix: instead of hoping the model learns from full episodes,\n", " we give it explicit training signal at every decision point.\n", " \"\"\"\n", " env = WorkSpaceEnvironment(mode=\"medium\")\n", " records = []\n", "\n", " # Oracle action sequence for easy mode\n", " oracle_sequence = [\n", " (\"ask_finance\", WorkSpaceAction(\n", " action_type=\"message_expert\", target=\"Finance\",\n", " content=\"What budget ceiling must the PRD respect?\"\n", " )),\n", " (\"ask_security\", WorkSpaceAction(\n", " action_type=\"message_expert\", target=\"Security\",\n", " content=\"What authentication requirements must be included?\"\n", " )),\n", " (\"ask_ux\", WorkSpaceAction(\n", " action_type=\"message_expert\", target=\"UX\",\n", " content=\"What checkout flow is required?\"\n", " )),\n", " (\"propose_draft\", WorkSpaceAction(\n", " action_type=\"propose_draft\", target=\"All\",\n", " content=\"PRD: Budget at or below $50k. Biometric 2FA required. Single-click checkout.\"\n", " )),\n", " (\"submit_final\", WorkSpaceAction(\n", " action_type=\"submit_final\", target=None,\n", " content=\"Final PRD: Budget capped at $50k. Biometric 2FA for auth. Single-click checkout.\"\n", " )),\n", " ]\n", "\n", "\n", " for topic in topics:\n", " obs = env.reset(topic)\n", " conversation_history = \"\"\n", " discovered = \" Finance: ? unknown\\n Security: ? unknown\\n UX: ? unknown\"\n", "\n", " for step_idx, (action_key, oracle_action) in enumerate(oracle_sequence):\n", " if obs.done:\n", " break\n", "\n", " # Snapshot the state BEFORE taking the action\n", " prompt = build_state_prompt(\n", " topic=topic,\n", " turn=obs.current_turn,\n", " feedback_so_far=obs.feedback,\n", " discovered=discovered,\n", " conversation_history=conversation_history,\n", " )\n", "\n", " records.append({\n", " \"prompt\": prompt,\n", " \"topic\": topic,\n", " \"turn\": obs.current_turn,\n", " \"oracle_action\": ORACLE_ACTIONS[action_key],\n", " # These metadata fields help with debugging and post-analysis\n", " \"step_idx\": step_idx,\n", " \"discovered_before\": discovered,\n", " })\n", "\n", " # Step forward with oracle action to get next state\n", " obs = env.step(oracle_action)\n", " conversation_history += (\n", " f\"Turn {step_idx}: {oracle_action.action_type} → {oracle_action.target}\\n\"\n", " f\"Feedback: {obs.feedback[:120]}...\\n\"\n", " )\n", " discovered = format_discovered(env)\n", "\n", " if step_idx >= states_per_topic - 1:\n", " break\n", "\n", " # Add negative-pattern states (what NOT to do)\n", " records.extend(build_negative_states(topics[:5]))\n", " # Upweight late-stage \"submit_final\" states so policy learns to finish.\n", " late_stage = build_late_stage_states(topics)\n", " records.extend(late_stage)\n", " records.extend(late_stage)\n", " records.extend(late_stage)\n", " random.shuffle(records)\n", "\n", " print(f\"Built {len(records)} training states from {len(topics)} topics\")\n", " return records" ] }, { "cell_type": "markdown", "id": "8db26d85", "metadata": {}, "source": [ "### Late Stage State and Negative Pattern State (what NOT to do)\n", "\n", "- Upweight late-stage \"submit_final\" states so policy learns to finish." ] }, { "cell_type": "code", "execution_count": null, "id": "a75ccb32", "metadata": {}, "outputs": [], "source": [ "def build_late_stage_states(topics: list[str]) -> list[dict]:\n", " \"\"\"\n", " FIX 3: Inject guaranteed late-stage states.\n", " Forces the model to learn how to synthesize and submit the final PRD.\n", " \"\"\"\n", " late_records = []\n", " for topic in topics:\n", " prompt = build_state_prompt(\n", " topic=topic,\n", " turn=4,\n", " feedback_so_far=\"UX: The checkout must be a single click.\",\n", " discovered=\" Finance: ✓ DISCOVERED\\n Security: ✓ DISCOVERED\\n UX: ✓ DISCOVERED\",\n", " conversation_history=(\n", " \"Turn 0: message_expert → Finance\\nFeedback: The budget cap is $50k.\\n\"\n", " \"Turn 1: message_expert → Security\\nFeedback: Biometric 2FA is strictly required.\\n\"\n", " \"Turn 2: message_expert → UX\\nFeedback: Checkout must be a single click.\\n\"\n", " )\n", " )\n", " late_records.append({\n", " \"prompt\": prompt,\n", " \"topic\": topic,\n", " \"turn\": 4,\n", " \"oracle_action\": ORACLE_ACTIONS[\"submit_final\"],\n", " \"step_idx\": 4,\n", " \"discovered_before\": \" Finance: ✓ DISCOVERED\\n Security: ✓ DISCOVERED\\n UX: ✓ DISCOVERED\",\n", " })\n", " return late_records\n", "\n", "# negative sates that tells what not to do\n", "\n", "def build_negative_states(topics: list[str]) -> list[dict]:\n", " \"\"\"\n", " States where the agent is in a bad situation (repeated question, wrong phase).\n", " These teach the model to recover, not just follow the oracle.\n", " \"\"\"\n", " negative_records = []\n", "\n", " for topic in topics:\n", " # State: Finance already answered, agent is about to repeat\n", " prompt = build_state_prompt(\n", " topic=topic,\n", " turn=2,\n", " feedback_so_far=(\n", " \"Finance: As I mentioned, we have a strict $50k budget cap. \"\n", " \"This is the same answer I gave before.\"\n", " ),\n", " discovered=\" Finance: ✓ DISCOVERED\\n Security: ? unknown\\n UX: ? unknown\",\n", " conversation_history=(\n", " \"Turn 0: message_expert → Finance\\n\"\n", " \"Feedback: Finance: The budget cap is $50k. Don't go over it.\\n\"\n", " \"Turn 1: message_expert → Finance\\n\"\n", " \"Feedback: Finance: I already told you — $50k. Ask someone else.\\n\"\n", " ),\n", " )\n", " negative_records.append({\n", " \"prompt\": prompt,\n", " \"topic\": topic,\n", " \"turn\": 2,\n", " \"oracle_action\": ORACLE_ACTIONS[\"ask_security\"], # Should pivot to Security\n", " \"step_idx\": -1, # Negative example\n", " \"discovered_before\": \"Finance: ✓ DISCOVERED\",\n", " })\n", "\n", " return negative_records" ] }, { "cell_type": "markdown", "id": "4f25c978", "metadata": {}, "source": [ "### Reward Function\n", "- Formatting Penalty.\n", "- Anti-Pattern Penalties.\n", "- Massive penalty for broadcasting (Reward Hacking).\n", "- Penalty for empty or trivially short drafts/finals (short expert questions are often valid and should not be over-penalized)\n", "- Penalize very long outputs; they correlate with max-length clipping.\n", "- Hard penalty for non-terminated JSON-like responses.\n", "- Strongly discourage invalid action/target combinations\n", "- HEURISTIC STATE GRADING\n", "- Did it try to submit before gathering all constraints?\n", "- ORACLE-GUIDED DENSE SHAPING (This gives non-binary signal and prevents reward plateaus)\n", "- Late turns should avoid endless questioning/proposals\n", "- Keeping the reward in stable range for GRPO" ] }, { "cell_type": "code", "execution_count": null, "id": "b082cf4c", "metadata": {}, "outputs": [], "source": [ "def make_reward_fn():\n", " \"\"\"\n", " Evaluates the model's actions instantly and locally.\n", " No live API calls. No reward hacking loopholes.\n", " \"\"\"\n", " def reward_fn(completions: list[str], prompts: list[str], **kwargs) -> list[float]:\n", " rewards = []\n", " oracle_actions = kwargs.get(\"oracle_action\", [None] * len(completions))\n", " turns = kwargs.get(\"turn\", [None] * len(completions))\n", "\n", " for completion, prompt, oracle_raw, turn in zip(completions, prompts, oracle_actions, turns):\n", " action = parse_action(completion)\n", "\n", " # 1. Formatting Penalty\n", " if action is None:\n", " rewards.append(-0.5)\n", " continue\n", "\n", " reward = 0.0\n", " completion_text = (completion or \"\").strip()\n", "\n", " # ── 2. YOUR ANTI-PATTERN PENALTIES ──\n", " \n", " # Massive penalty for broadcasting (Reward Hacking)\n", " if action.target == \"All\":\n", " reward -= 1.0 \n", " \n", " # Penalty for empty or trivially short drafts/finals\n", " # (short expert questions are often valid and should not be over-penalized)\n", " if action.action_type in {\"propose_draft\", \"submit_final\"} and len((action.content or \"\").split()) < 5:\n", " reward -= 0.2\n", "\n", " # Penalize very long outputs; they correlate with max-length clipping.\n", " if len((action.content or \"\").split()) > 80:\n", " reward -= 0.2\n", " if len(completion_text) > 320:\n", " reward -= 0.15\n", "\n", " # Encourage strict JSON-only behavior: starts with { and ends with }.\n", " is_strict_json = completion_text.startswith(\"{\") and completion_text.endswith(\"}\")\n", " if is_strict_json:\n", " reward += 0.1\n", " else:\n", " reward -= 0.3\n", "\n", " # Hard penalty for non-terminated JSON-like responses.\n", " # This directly pushes generations away from max-token clipping.\n", " if not completion_text.endswith(\"}\"):\n", " reward -= 0.25\n", "\n", " # Small bonus for compact single-line JSON output.\n", " if is_strict_json and \"\\n\" not in completion_text and len(completion_text) <= 240:\n", " reward += 0.08\n", "\n", " # Strongly discourage invalid action/target combinations.\n", " if action.action_type == \"submit_final\" and action.target is not None:\n", " reward -= 0.6\n", " if action.action_type in {\"message_expert\", \"propose_draft\"} and action.target is None:\n", " reward -= 0.6\n", "\n", " # ── 3. HEURISTIC STATE GRADING (NO API CALLS!) ──\n", " \n", " if action.action_type == \"message_expert\" and action.target != \"All\":\n", " # Did it ask a question it already knows the answer to?\n", " if f\"{action.target}: ✓ DISCOVERED\" in prompt:\n", " reward -= 0.5\n", " else:\n", " reward += 0.33 # Good job doing research!\n", "\n", " elif action.action_type in [\"propose_draft\", \"submit_final\"]:\n", " # Did it try to submit before gathering all constraints?\n", " if \"? unknown\" in prompt:\n", " reward -= 1.0 # Heavy penalty for guessing\n", " else:\n", " # It did the research. Did it actually include the constraints?\n", " text = action.content.lower()\n", " has_finance = \"50\" in text\n", " has_security = \"biometric\" in text\n", " has_ux = \"click\" in text or \"tap\" in text\n", " \n", " if has_finance and has_security and has_ux:\n", " reward += 1.5 \n", " else:\n", " reward -= 0.5\n", "\n", " # ── 4. ORACLE-GUIDED DENSE SHAPING ──\n", " # This gives non-binary signal and prevents reward plateaus.\n", " if oracle_raw:\n", " oracle_action = parse_action(oracle_raw)\n", " if oracle_action is not None:\n", " if action.action_type == oracle_action.action_type:\n", " reward += 0.45\n", " else:\n", " reward -= 0.25\n", "\n", " if action.target == oracle_action.target:\n", " reward += 0.35\n", " else:\n", " reward -= 0.2\n", "\n", " overlap = lexical_overlap(action.content, oracle_action.content)\n", " reward += 0.4 * overlap\n", "\n", " # Late turns should avoid endless questioning/proposals.\n", " if isinstance(turn, int):\n", " if turn >= 8 and action.action_type != \"submit_final\":\n", " reward -= 0.35\n", " if turn >= 10 and action.action_type != \"submit_final\":\n", " reward -= 0.6\n", " # Reward timely completion once constraints are all discovered.\n", " if (\n", " action.action_type == \"submit_final\"\n", " and \"? unknown\" not in prompt\n", " and turn <= 10\n", " ):\n", " reward += 0.6\n", "\n", " # Keep rewards in a stable range for GRPO.\n", " reward = max(-2.0, min(2.0, reward))\n", "\n", " rewards.append(reward)\n", "\n", " return rewards\n", " return reward_fn" ] }, { "cell_type": "markdown", "id": "276d1887", "metadata": {}, "source": [ "### GRAPH PLOTS" ] }, { "cell_type": "code", "execution_count": null, "id": "fa70239a", "metadata": {}, "outputs": [], "source": [ "def save_training_plots(log_history: list[dict], output_dir: Path):\n", " if not HAS_PLT:\n", " print(\" matplotlib not available — skipping plots\")\n", " return\n", "\n", " output_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " # Loss curve\n", " loss_points = [\n", " (e[\"step\"], e[\"loss\"])\n", " for e in log_history\n", " if \"loss\" in e and \"step\" in e\n", " ]\n", " if loss_points:\n", " xs, ys = zip(*loss_points)\n", " fig, ax = plt.subplots(figsize=(9, 4))\n", " ax.plot(xs, ys, marker=\"o\", linewidth=1.5, color=\"#4C72B0\", markersize=4)\n", " ax.set_xlabel(\"Training Step\", fontsize=12)\n", " ax.set_ylabel(\"GRPO Loss\", fontsize=12)\n", " ax.set_title(\n", " \"Project Polymath — GRPO Training Loss\\n\"\n", " \"(State-Based: each step = one negotiation decision)\",\n", " fontsize=12\n", " )\n", " ax.grid(True, alpha=0.3)\n", " plt.tight_layout()\n", " plt.savefig(output_dir / \"loss_curve.png\", dpi=160)\n", " plt.close()\n", " print(f\" Saved: {output_dir}/loss_curve.png\")\n", "\n", " # Reward curve (from log history if available)\n", " reward_points = [\n", " (e[\"step\"], e.get(\"reward\", e.get(\"mean_reward\", None)))\n", " for e in log_history\n", " if \"step\" in e and (\"reward\" in e or \"mean_reward\" in e)\n", " ]\n", " reward_points = [(s, r) for s, r in reward_points if r is not None]\n", "\n", " if reward_points:\n", " xs, ys = zip(*reward_points)\n", " fig, ax = plt.subplots(figsize=(9, 4))\n", " ax.plot(xs, ys, marker=\"s\", linewidth=1.5, color=\"#55A868\", markersize=4)\n", " ax.set_xlabel(\"Training Step\", fontsize=12)\n", " ax.set_ylabel(\"Mean Reward\", fontsize=12)\n", " ax.set_title(\n", " \"Project Polymath — Mean Reward During GRPO Training\\n\"\n", " \"(Harmonic mean of Finance/Security/UX constraint satisfaction)\",\n", " fontsize=12\n", " )\n", " ax.grid(True, alpha=0.3)\n", " plt.tight_layout()\n", " plt.savefig(output_dir / \"reward_curve.png\", dpi=160)\n", " plt.close()\n", " print(f\" Saved: {output_dir}/reward_curve.png\")" ] }, { "cell_type": "markdown", "id": "57034d95", "metadata": {}, "source": [ "### Main functin" ] }, { "cell_type": "code", "execution_count": null, "id": "01e33d44", "metadata": {}, "outputs": [], "source": [ "def main():\n", " parser = argparse.ArgumentParser(description=\"State-Based GRPO — Project Polymath\")\n", "\n", " # Model\n", " parser.add_argument(\"--model\", default=\"unsloth/Qwen2.5-3B-Instruct-bnb-4bit\",\n", " help=\"Base model to train\")\n", " parser.add_argument(\"--use-unsloth\", action=\"store_true\",\n", " help=\"Use Unsloth for 2x faster training (recommended on GPU)\")\n", "\n", " # Dataset\n", " parser.add_argument(\"--states\", type=int, default=40,\n", " help=\"Number of negotiation states to train on\")\n", " parser.add_argument(\"--states-per-topic\", type=int, default=5,\n", " help=\"States to extract per topic (1-5)\")\n", " parser.add_argument(\"--topics-limit\", type=int, default=20,\n", " help=\"Max topics to use from ai_pm_prompts.json\")\n", "\n", " # GRPO hyperparams\n", " parser.add_argument(\"--group-size\", type=int, default=8,\n", " help=\"G: completions per prompt for GRPO advantage (default: 8)\")\n", " parser.add_argument(\"--epochs\", type=float, default=3.0)\n", " parser.add_argument(\"--lr\", type=float, default=5e-6,\n", " help=\"Learning rate (lower = safer, 5e-6 recommended for GRPO)\")\n", " parser.add_argument(\"--max-new-tokens\", type=int, default=40,\n", " help=\"Max generated tokens per sampled completion (default: 40)\")\n", " parser.add_argument(\"--temperature\", type=float, default=0.9,\n", " help=\"Sampling temperature for GRPO rollouts\")\n", " parser.add_argument(\"--top-p\", type=float, default=0.9,\n", " help=\"Nucleus sampling p for GRPO rollouts\")\n", " parser.add_argument(\"--batch-size\", type=int, default=1)\n", " parser.add_argument(\"--grad-accum\", type=int, default=4)\n", " parser.add_argument(\"--max-seq-length\", type=int, default=2048)\n", "\n", " # Output\n", " parser.add_argument(\"--output-dir\", default=str(OUTPUT_DIR))\n", " parser.add_argument(\"--dry-run\", action=\"store_true\",\n", " help=\"Build dataset and verify reward fn, skip actual training\")\n", "\n", " args = parser.parse_args()\n", "\n", " # for dry run only\n", " # if not HAS_TRL:\n", " # raise RuntimeError(\"pip install trl>=0.8.0 transformers datasets\")\n", "\n", " output_dir = Path(args.output_dir)\n", " output_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " # Build dataset\n", " print(\"\\n[1/4] Loading state dataset...\")\n", " topics = load_topics(limit=args.topics_limit)\n", " dataset_path = output_dir / \"state_dataset.jsonl\"\n", "\n", " # CACHING LOGIC for dataset\n", " if dataset_path.exists():\n", " print(f\" [CACHE HIT] Found existing dataset! Loading instantly from {dataset_path}...\")\n", " records = []\n", " with dataset_path.open(\"r\") as f:\n", " for line in f:\n", " if line.strip():\n", " records.append(json.loads(line))\n", " records = records[:args.states]\n", " else:\n", " print(\" [CACHE MISS] No dataset found. Generating from scratch (this may take a minute)...\")\n", " records = build_state_dataset(topics, states_per_topic=args.states_per_topic)\n", " records = records[:args.states]\n", " \n", " with dataset_path.open(\"w\") as f:\n", " for r in records:\n", " f.write(json.dumps(r, ensure_ascii=True) + \"\\n\")\n", " print(f\" Saved {len(records)} states → {dataset_path}\")\n", "\n", " dataset = Dataset.from_list([{\n", " \"prompt\": r[\"prompt\"],\n", " \"topic\": r[\"topic\"],\n", " \"turn\": r[\"turn\"],\n", " \"oracle_action\": r[\"oracle_action\"],\n", " \"step_idx\": r[\"step_idx\"],\n", " } for r in records])\n", "\n", " # Verify reward function\n", " print(\"\\n[2/4] Verifying reward function on 6 samples...\")\n", " reward_fn = make_reward_fn()\n", " # reward_fn = make_reward_fn(topics)\n", " prompt_with_finance_discovered = build_state_prompt(\n", " topic=topics[0],\n", " turn=1,\n", " feedback_so_far=\"Finance: Budget cap is $50k.\",\n", " discovered=\" Finance: ✓ DISCOVERED\\n Security: ? unknown\\n UX: ? unknown\",\n", " )\n", "\n", " test_completions = [\n", " ORACLE_ACTIONS[\"ask_finance\"], # Should score ~0.33\n", " ORACLE_ACTIONS[\"ask_security\"], # Should score ~0.33 \n", " '{\"action_type\": \"message_expert\", \"target\": \"Finance\", \"content\": \"What is the budget?\"}', # Should score -0.5 (repeat)\n", " '{\"action_type\": \"message_expert\", \"target\": \"All\", \"content\": \"What do you all need?\"}', # Should score -1.0\n", " \"this is not JSON at all\", # Should score -0.5\n", " ORACLE_ACTIONS[\"submit_final\"], # Should score +1.5 (all constraints in content)\n", " ]\n", "\n", " test_rewards = reward_fn(\n", " completions=test_completions,\n", " prompts=[\n", " \"\", # oracle ask_finance — unknown state\n", " prompt_with_finance_discovered, # ask_security — good pivot\n", " prompt_with_finance_discovered, # ask Finance again — repeat penalty\n", " \"\", # broadcast — penalty\n", " \"\", # bad JSON\n", " build_state_prompt( # submit after all discovered\n", " topic=topics[0], turn=4,\n", " feedback_so_far=\"All experts responded.\",\n", " discovered=\" Finance: ✓ DISCOVERED\\n Security: ✓ DISCOVERED\\n UX: ✓ DISCOVERED\",\n", " ),\n", " ],\n", " )\n", "\n", " labels = [\n", " \"Oracle ask_finance\",\n", " \"Oracle ask_security\",\n", " \"Repeat question to discovered expert\",\n", " \"Broadcast to All\",\n", " \"Malformed JSON\",\n", " \"Submit final with all constraints\",\n", " ]\n", " for label, reward in zip(labels, test_rewards):\n", " print(f\" • {label}: reward={reward:.3f}\")\n", "\n", " ask_finance_r, ask_security_r, repeat_r, broadcast_r, malformed_r, submit_r = test_rewards\n", " checks = [\n", " (\"oracle ask_finance is positive\", ask_finance_r > 0.0),\n", " (\"oracle ask_security is positive\", ask_security_r > 0.0),\n", " (\"repeat < oracle ask_finance\", repeat_r < ask_finance_r),\n", " (\"broadcast <= repeat\", broadcast_r <= repeat_r),\n", " (\"malformed JSON is negative\", malformed_r < 0.0),\n", " (\"submit_final > oracle ask_finance\", submit_r > ask_finance_r),\n", " ]\n", " all_ok = True\n", " for name, passed in checks:\n", " status = \"✓\" if passed else \"✗\"\n", " print(f\" {status} invariant: {name}\")\n", " all_ok = all_ok and passed\n", " if not all_ok:\n", " raise RuntimeError(\"Reward verification invariants failed. Check reward shaping logic.\")\n", "\n", " if args.dry_run:\n", " print(\"\\n[DRY RUN] Dataset and reward function verified. Skipping training.\")\n", " print(\" Run without --dry-run on GPU to train.\")\n", " return\n", " \n", " # FOR DRY RUN ONLY\n", " if not HAS_TRL:\n", " raise RuntimeError(\"TRL is required for actual training on the GPU.\")\n", " # Load model\n", " print(f\"\\n[3/4] Loading model: {args.model}\")\n", "\n", " if args.use_unsloth:\n", " try:\n", " from unsloth import FastLanguageModel\n", " HAS_UNSLOTH = True\n", " except Exception as e:\n", " raise RuntimeError(f\"Unsloth failed to load: {e}\\nRun without --use-unsloth instead.\")\n", " model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name=args.model,\n", " max_seq_length=args.max_seq_length,\n", " load_in_4bit=True,\n", " dtype=None,\n", " )\n", " model = FastLanguageModel.get_peft_model(\n", " model,\n", " r=16,\n", " lora_alpha=32,\n", " lora_dropout=0.0,\n", " target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n", " \"gate_proj\", \"up_proj\", \"down_proj\"],\n", " use_gradient_checkpointing=\"unsloth\",\n", " )\n", "\n", " if tokenizer.pad_token is None:\n", " tokenizer.pad_token = tokenizer.eos_token\n", " im_end_id = tokenizer.convert_tokens_to_ids(\"<|im_end|>\")\n", " if isinstance(im_end_id, int) and im_end_id >= 0:\n", " tokenizer.eos_token = \"<|im_end|>\"\n", " model.config.pad_token_id = tokenizer.pad_token_id\n", " if tokenizer.eos_token_id is not None:\n", " model.config.eos_token_id = tokenizer.eos_token_id\n", " model.generation_config.eos_token_id = tokenizer.eos_token_id\n", " model.generation_config.pad_token_id = tokenizer.pad_token_id\n", " print(\" Unsloth LoRA loaded (4-bit quantization)\")\n", " else:\n", " if not HAS_TRANSFORMERS:\n", " raise RuntimeError(\"pip install transformers\")\n", " tokenizer = AutoTokenizer.from_pretrained(args.model)\n", " if tokenizer.pad_token is None:\n", " tokenizer.pad_token = tokenizer.eos_token\n", "\n", " # Qwen chat models typically terminate on <|im_end|>.\n", " im_end_id = tokenizer.convert_tokens_to_ids(\"<|im_end|>\")\n", " if isinstance(im_end_id, int) and im_end_id >= 0:\n", " tokenizer.eos_token = \"<|im_end|>\"\n", "\n", " model = AutoModelForCausalLM.from_pretrained(args.model)\n", "\n", " # Keepping model/generation config aligned with tokenizer.\n", " model.config.pad_token_id = tokenizer.pad_token_id\n", " if tokenizer.eos_token_id is not None:\n", " model.config.eos_token_id = tokenizer.eos_token_id\n", " model.generation_config.eos_token_id = tokenizer.eos_token_id\n", " model.generation_config.pad_token_id = tokenizer.pad_token_id\n", "\n", " print(\" Standard transformers model loaded\")\n", "\n", " # GRPO Training\n", " print(f\"\\n[4/4] Starting GRPO training...\")\n", " print(f\" States: {len(records)} | Group size (G): {args.group_size}\")\n", " print(f\" Epochs: {args.epochs} | LR: {args.lr}\")\n", " print(f\" Total updates: ~{int(len(records) * args.epochs / args.batch_size)}\")\n", "\n", " # Build explicit stop-token list for GRPO sampling.\n", " eos_ids = []\n", " if tokenizer.eos_token_id is not None:\n", " eos_ids.append(int(tokenizer.eos_token_id))\n", " im_end_id = tokenizer.convert_tokens_to_ids(\"<|im_end|>\")\n", " if isinstance(im_end_id, int) and im_end_id >= 0:\n", " eos_ids.append(int(im_end_id))\n", " close_brace_id = tokenizer.convert_tokens_to_ids(\"}\")\n", " if isinstance(close_brace_id, int) and close_brace_id >= 0:\n", " eos_ids.append(int(close_brace_id))\n", " # Preserve order, remove duplicates.\n", " eos_ids = list(dict.fromkeys(eos_ids))\n", "\n", " generation_kwargs = {\n", " \"eos_token_id\": eos_ids if len(eos_ids) > 1 else (eos_ids[0] if eos_ids else None),\n", " \"pad_token_id\": tokenizer.pad_token_id,\n", " }\n", " # Remove None values.\n", " generation_kwargs = {k: v for k, v in generation_kwargs.items() if v is not None}\n", "\n", " config = GRPOConfig(\n", " output_dir=str(output_dir),\n", "\n", " # GRPO-specific\n", " num_generations=args.group_size, # G: sample this many completions per prompt\n", " max_completion_length=args.max_new_tokens,\n", " temperature=args.temperature,\n", " top_p=args.top_p,\n", " top_k=40,\n", " repetition_penalty=1.05,\n", " generation_kwargs=generation_kwargs,\n", " mask_truncated_completions=True,\n", "\n", " # Standard training\n", " learning_rate=args.lr,\n", " num_train_epochs=args.epochs,\n", " per_device_train_batch_size=max(1, args.batch_size),\n", " gradient_accumulation_steps=args.grad_accum,\n", "\n", " # Logging\n", " logging_steps=1,\n", " save_strategy=\"epoch\",\n", " report_to=[],\n", " )\n", "\n", " trainer = GRPOTrainer(\n", " model=model,\n", " processing_class=tokenizer,\n", " args=config,\n", " reward_funcs=reward_fn, \n", " train_dataset=dataset,\n", " )\n", "\n", " trainer.train()\n", "\n", " # Saving everything\n", " trainer.save_model(str(output_dir / \"final_model\"))\n", " tokenizer.save_pretrained(str(output_dir / \"final_model\"))\n", " print(f\"\\n Model saved → {output_dir}/final_model\")\n", "\n", " # Saving metrics\n", " metrics_path = output_dir / \"grpo_metrics.json\"\n", " with metrics_path.open(\"w\") as f:\n", " json.dump(trainer.state.log_history, f, indent=2)\n", " print(f\" Metrics saved → {metrics_path}\")\n", "\n", " # Saving plots\n", " save_training_plots(trainer.state.log_history, output_dir)\n", "\n", " # Summary\n", " log = trainer.state.log_history\n", " losses = [e[\"loss\"] for e in log if \"loss\" in e]\n", " if losses:\n", " print(f\"\\n Initial loss: {losses[0]:.4f}\")\n", " print(f\" Final loss: {losses[-1]:.4f}\")\n", " print(f\" Improvement: {((losses[0] - losses[-1]) / losses[0] * 100):.1f}%\")\n", "\n", " print(f\"\\n{'='*60}\")\n", " print(f\" GRPO TRAINING COMPLETE\")\n", " print(f\" Model: {output_dir}/final_model\")\n", " print(f\" Plots: {output_dir}/loss_curve.png\")\n", " print(f\" {output_dir}/reward_curve.png\")\n", " print(f\" Metrics: {output_dir}/grpo_metrics.json\")\n", " print(f\"{'='*60}\")\n", "\n", "\n", "if __name__ == \"__main__\":\n", " main()" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 5 }