Spaces:
Sleeping
Sleeping
| """ | |
| Student Agent for Text Adventure Games | |
| This is your submission file. Implement the StudentAgent class to play | |
| text adventure games using the MCP server you also implement. | |
| Your agent should: | |
| 1. Connect to the MCP server via the provided client | |
| 2. Use the ReAct pattern (Thought -> Action -> Observation) | |
| 3. Call MCP tools to interact with the game | |
| 4. Maximize the game score within the step limit | |
| Required method: | |
| async def run(self, client, game, max_steps, seed, verbose) -> RunResult | |
| The 'client' is a FastMCP Client already connected to your MCP server. | |
| Use it to call tools like: await client.call_tool("play_action", {"action": "look"}) | |
| Tips: | |
| - Start by looking around and understanding your environment | |
| - Keep track of visited locations to avoid loops | |
| - Pick up useful items (lamp, sword, etc.) | |
| - The seed parameter should be used to set your LLM's seed for reproducibility | |
| """ | |
| import json | |
| import os | |
| import re | |
| import urllib.request | |
| from dataclasses import dataclass, field | |
| from collections import deque | |
| from typing import Optional | |
| from dotenv import load_dotenv | |
| from huggingface_hub import InferenceClient | |
| # Load environment variables | |
| load_dotenv() | |
| # ============================================================================= | |
| # LLM Configuration - DO NOT MODIFY | |
| # ============================================================================= | |
| # Backend selection | |
| LLM_BACKEND = os.getenv("LLM_BACKEND", "hf").lower() | |
| # HF model (default backend) | |
| LLM_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen2.5-72B-Instruct") | |
| # Ollama model (local backend) | |
| OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "qwen2.5:3b") | |
| OLLAMA_URL = os.getenv("OLLAMA_URL", "http://127.0.0.1:11434/api/chat") | |
| # Initialize the LLM client (uses HF_TOKEN from environment) | |
| _hf_token = os.getenv("HF_TOKEN") | |
| LLM_CLIENT = InferenceClient(token=_hf_token) if _hf_token else None | |
| def call_llm(prompt: str, system_prompt: str, seed: int, max_tokens: int = 300) -> str: | |
| """ | |
| Call the LLM with the given prompt. Use this function in your agent. | |
| Args: | |
| prompt: The user prompt (current game state, history, etc.) | |
| system_prompt: The system prompt (instructions for the agent) | |
| seed: Random seed for reproducibility | |
| max_tokens: Maximum tokens in response (default: 300) | |
| Returns: | |
| The LLM's response text | |
| Example: | |
| response = call_llm( | |
| prompt="You are in a forest. What do you do?", | |
| system_prompt=SYSTEM_PROMPT, | |
| seed=42, | |
| ) | |
| """ | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": prompt}, | |
| ] | |
| if LLM_BACKEND == "ollama": | |
| payload = { | |
| "model": OLLAMA_MODEL, | |
| "messages": messages, | |
| "stream": False, | |
| "options": { | |
| "temperature": 0.0, | |
| "num_predict": max_tokens, | |
| "seed": seed, | |
| }, | |
| } | |
| req = urllib.request.Request( | |
| OLLAMA_URL, | |
| data=json.dumps(payload).encode("utf-8"), | |
| headers={"Content-Type": "application/json"}, | |
| method="POST", | |
| ) | |
| with urllib.request.urlopen(req, timeout=120) as response: | |
| body = response.read().decode("utf-8") | |
| parsed = json.loads(body) | |
| return parsed.get("message", {}).get("content", "") | |
| if not LLM_CLIENT: | |
| raise ValueError("HF_TOKEN not found. Set it in your .env file or use LLM_BACKEND=ollama.") | |
| response = LLM_CLIENT.chat.completions.create( | |
| model=LLM_MODEL, | |
| messages=messages, | |
| temperature=0.0, # Deterministic for reproducibility | |
| max_tokens=max_tokens, | |
| seed=seed, | |
| ) | |
| return response.choices[0].message.content | |
| class RunResult: | |
| """Result of running the agent. Do not modify this class.""" | |
| final_score: int | |
| max_score: int | |
| moves: int | |
| locations_visited: set[str] | |
| game_completed: bool | |
| error: Optional[str] = None | |
| history: list[tuple[str, str, str]] = field(default_factory=list) | |
| # ============================================================================= | |
| # System Prompt - Customize this for your agent | |
| # ============================================================================= | |
| SYSTEM_PROMPT = """You are playing a classic text adventure game. | |
| GOAL: Explore the world, solve puzzles, and maximize your score. | |
| AVAILABLE TOOLS (use via MCP): | |
| - play_action: Execute a game command (north, take lamp, open mailbox, etc.) | |
| - memory: Get state summary + recent history + loop diagnostics | |
| - inventory: Check what you're carrying | |
| - get_map: Get explored locations and frontier directions | |
| - get_stats: Get compact state JSON (score, moves, done, loop signals) | |
| - remember: Save a short note as key/value | |
| - recall: Retrieve saved notes | |
| VALID GAME COMMANDS for play_action: | |
| - Movement: north, south, east, west, up, down, enter, exit | |
| - Objects: take <item>, drop <item>, open <thing>, close <thing>, examine <thing> | |
| - Other: look, inventory, read <thing>, turn on lamp | |
| RESPOND IN THIS EXACT FORMAT (no markdown): | |
| THOUGHT: <your reasoning about what to do next> | |
| TOOL: <tool_name> | |
| ARGS: <JSON arguments, e.g., {"action": "look"}> | |
| Example: | |
| THOUGHT: I should look around to see where I am. | |
| TOOL: play_action | |
| ARGS: {"action": "look"} | |
| POLICY: | |
| 1) Prefer play_action on most turns. | |
| 2) If score/reward is stagnant or location repeats, prioritize unexplored movement/frontier. | |
| 3) Avoid repeating the same action in the same location unless new evidence appears. | |
| 4) Use memory/get_map/get_stats only when needed to break uncertainty. | |
| """ | |
| # ============================================================================= | |
| # Student Agent - IMPLEMENT THIS CLASS | |
| # ============================================================================= | |
| class StudentAgent: | |
| """ | |
| Your ReAct agent implementation. | |
| TODO: | |
| 1. Implement the run() method with the ReAct loop | |
| 2. Parse LLM responses to extract tool calls | |
| 3. Track state and avoid loops | |
| Use the provided call_llm() function to interact with the LLM. | |
| """ | |
| def __init__(self): | |
| """Initialize your agent here.""" | |
| self.history: list[dict] = [] | |
| self.recent_actions: deque[str] = deque(maxlen=8) | |
| self.location_action_counts: dict[tuple[str, str], int] = {} | |
| self.score: int = 0 | |
| self.max_score: int = 350 | |
| self.last_observation: str = "" | |
| self.non_play_streak: int = 0 | |
| self.cached_map: str = "" | |
| self.cached_inventory: str = "" | |
| self.note_counter: int = 0 | |
| async def run( | |
| self, | |
| client, # FastMCP Client connected to your MCP server | |
| game: str, | |
| max_steps: int, | |
| seed: int, | |
| verbose: bool = False, | |
| ) -> RunResult: | |
| """ | |
| Run the agent for a game session. | |
| Args: | |
| client: FastMCP Client connected to your MCP server | |
| game: Name of the game being played (e.g., "zork1") | |
| max_steps: Maximum number of steps to take | |
| seed: Random seed for reproducibility (use for LLM calls) | |
| verbose: Whether to print detailed output | |
| Returns: | |
| RunResult with final score and statistics | |
| """ | |
| locations_visited = set() | |
| history: list[tuple[str, str, str]] = [] | |
| final_score = 0 | |
| moves = 0 | |
| game_completed = False | |
| error = None | |
| print(f"Starting game '{game}' with seed {seed}, using LLM model '{LLM_MODEL}'.") | |
| try: | |
| tools = await client.list_tools() | |
| tool_names = {t.name for t in tools} | |
| async def call_tool(tool: str, args: dict) -> str: | |
| result = await client.call_tool(tool, args) | |
| return self._extract_result(result) | |
| observation = await call_tool("play_action", {"action": "look"}) | |
| self.last_observation = observation | |
| stats = await self._get_stats(client, tool_names) | |
| self._update_state_from_stats(stats) | |
| location = stats.get("location") or self._extract_location(observation) | |
| if location: | |
| locations_visited.add(location) | |
| if "get_map" in tool_names: | |
| self.cached_map = await call_tool("get_map", {}) | |
| if "inventory" in tool_names: | |
| self.cached_inventory = await call_tool("inventory", {}) | |
| if verbose: | |
| print(f"\n{observation}") | |
| print(max_steps) | |
| for step in range(1, max_steps + 1): | |
| location = stats.get("location") or self._extract_location(observation) | |
| no_progress = int(stats.get("no_progress_streak", 0) or 0) | |
| if "get_map" in tool_names and (step % 6 == 0 or no_progress >= 3): | |
| self.cached_map = await call_tool("get_map", {}) | |
| if "inventory" in tool_names and step % 12 == 0: | |
| self.cached_inventory = await call_tool("inventory", {}) | |
| prompt = self._build_prompt( | |
| observation=observation, | |
| location=location, | |
| step=step, | |
| max_steps=max_steps, | |
| stats=stats, | |
| map_snapshot=self.cached_map, | |
| inventory_snapshot=self.cached_inventory, | |
| ) | |
| response = call_llm( | |
| prompt=prompt, | |
| system_prompt=SYSTEM_PROMPT, | |
| seed=seed + (step * 31), | |
| ) | |
| thought, tool_name, tool_args = self._parse_response(response) | |
| tool_name, tool_args = self._validate_tool_call(tool_name, tool_args, tool_names) | |
| if tool_name != "play_action": | |
| self.non_play_streak += 1 | |
| else: | |
| self.non_play_streak = 0 | |
| if self.non_play_streak >= 2: | |
| tool_name = "play_action" | |
| tool_args = { | |
| "action": self._next_exploration_action( | |
| current_location=location, | |
| map_snapshot=self.cached_map, | |
| ) | |
| } | |
| self.non_play_streak = 0 | |
| if tool_name == "play_action": | |
| action = tool_args.get("action", "look") | |
| action = self._normalize_action(action) | |
| action = self._anti_loop_override(action, location, stats) | |
| tool_args = {"action": action} | |
| self.recent_actions.append(action) | |
| moves += 1 | |
| if verbose: | |
| print(f"\n--- Step {step} ---") | |
| print(f"[THOUGHT] {thought}") | |
| print(f"[TOOL] {tool_name}({tool_args})") | |
| try: | |
| observation = await call_tool(tool_name, tool_args) | |
| except Exception as tool_exc: | |
| observation = f"Tool error: {tool_exc}" | |
| tool_name = "play_action" | |
| fallback_action = self._next_exploration_action(location, self.cached_map) | |
| tool_args = {"action": fallback_action} | |
| observation = await call_tool(tool_name, tool_args) | |
| self.last_observation = observation | |
| stats = await self._get_stats(client, tool_names) | |
| self._update_state_from_stats(stats) | |
| location = stats.get("location") or self._extract_location(observation) | |
| if location: | |
| locations_visited.add(location) | |
| final_score = int(stats.get("score", self.score) or self.score) | |
| moves = int(stats.get("moves", moves) or moves) | |
| self.max_score = int(stats.get("max_score", self.max_score) or self.max_score) | |
| game_completed = bool(stats.get("done", False)) or self._is_game_over(observation) | |
| if tool_name == "play_action": | |
| loc_key = location or "Unknown" | |
| act_key = tool_args.get("action", "look") | |
| key = (loc_key, act_key) | |
| self.location_action_counts[key] = self.location_action_counts.get(key, 0) + 1 | |
| await self._maybe_store_note(client, tool_names, location, observation) | |
| self.history.append( | |
| { | |
| "step": step, | |
| "thought": thought, | |
| "tool": tool_name, | |
| "args": tool_args, | |
| "observation": observation[:220], | |
| "score": final_score, | |
| } | |
| ) | |
| if len(self.history) > 18: | |
| self.history = self.history[-18:] | |
| history.append((thought, f"{tool_name}({tool_args})", observation[:120])) | |
| if verbose: | |
| print(f"[RESULT] {observation[:220]}...") | |
| print( | |
| f"[STATE] score={final_score}/{self.max_score} " | |
| f"moves={moves} loc={location}" | |
| ) | |
| if game_completed: | |
| print(f"Game completed at step {step} with score {final_score}.") | |
| break | |
| except Exception as exc: | |
| print(f"Error during agent run: {exc}") | |
| error = str(exc) | |
| if final_score == 0: | |
| print("Agent failed to score any points. Consider improving your action selection and exploration strategy.") | |
| final_score = self.score | |
| print("end") | |
| return RunResult( | |
| final_score=final_score, | |
| max_score=self.max_score, | |
| moves=moves, | |
| locations_visited=locations_visited, | |
| game_completed=game_completed, | |
| error=error, | |
| history=history, | |
| ) | |
| def _build_prompt( | |
| self, | |
| observation: str, | |
| location: str, | |
| step: int, | |
| max_steps: int, | |
| stats: dict, | |
| map_snapshot: str, | |
| inventory_snapshot: str, | |
| ) -> str: | |
| """ | |
| Build the prompt for the LLM. | |
| """ | |
| recent_lines = [] | |
| for item in self.history[-5:]: | |
| recent_lines.append( | |
| f"- {item['tool']} {item['args']} => score {item['score']} => {item['observation']}" | |
| ) | |
| if not recent_lines: | |
| recent_lines = ["- (none)"] | |
| frontier_hint = self._extract_frontier_from_map(map_snapshot) | |
| no_progress = int(stats.get("no_progress_streak", 0) or 0) | |
| prompt = ( | |
| f"Game: current run\n" | |
| f"Step: {step}/{max_steps}\n" | |
| f"Location: {location}\n" | |
| f"Score: {stats.get('score', self.score)}/{stats.get('max_score', self.max_score)}\n" | |
| f"Loop signals: no_progress={stats.get('no_progress_streak', 0)}, " | |
| f"same_location={stats.get('same_location_streak', 0)}, " | |
| f"repeat_action={stats.get('repeated_action_streak', 0)}\n\n" | |
| f"Recent decisions:\n" + "\n".join(recent_lines) + "\n\n" | |
| f"Inventory snapshot:\n{inventory_snapshot[:280] if inventory_snapshot else '(unknown)'}\n\n" | |
| f"Map/frontier snapshot:\n{map_snapshot[:520] if map_snapshot else '(unknown)'}\n\n" | |
| f"Current observation:\n{observation}\n\n" | |
| f"Guidance:\n" | |
| f"- Prefer play_action now unless a planning query is necessary.\n" | |
| f"- If no_progress >= 3, prioritize an unexplored movement from frontier ({', '.join(frontier_hint)}).\n" | |
| f"- Avoid repeating recent actions: {', '.join(list(self.recent_actions)[-4:])}.\n" | |
| f"- If you mention a clue in THOUGHT, keep it concise.\n" | |
| ) | |
| if no_progress >= 3: | |
| prompt += "\nYou appear stuck: choose a different movement or interaction than recent attempts.\n" | |
| return prompt | |
| def _parse_response(self, response: str) -> tuple[str, str, dict]: | |
| """ | |
| Parse LLM response to extract thought, tool name, and arguments. | |
| Returns: | |
| Tuple of (thought, tool_name, args_dict) | |
| """ | |
| thought = "No thought" | |
| tool_name = "play_action" | |
| args = {"action": "look"} | |
| thought_match = re.search(r"THOUGHT\s*:\s*(.+)", response, flags=re.IGNORECASE) | |
| if thought_match: | |
| thought = thought_match.group(1).strip() | |
| tool_match = re.search(r"TOOL\s*:\s*([^\n]+)", response, flags=re.IGNORECASE) | |
| if tool_match: | |
| tool_name = tool_match.group(1).strip().lower() | |
| tool_name = re.sub(r"[^a-zA-Z0-9_]+", "", tool_name) | |
| args_match = re.search(r"ARGS\s*:\s*(\{.*\})", response, flags=re.IGNORECASE | re.DOTALL) | |
| if args_match: | |
| raw_args = args_match.group(1).strip() | |
| try: | |
| args = json.loads(raw_args) | |
| except json.JSONDecodeError: | |
| raw_args = raw_args.replace("'", '"') | |
| try: | |
| args = json.loads(raw_args) | |
| except json.JSONDecodeError: | |
| action_match = re.search(r'"action"\s*:\s*"([^"]+)"', raw_args) | |
| if action_match: | |
| args = {"action": action_match.group(1)} | |
| else: | |
| args = {"action": "look"} | |
| if not isinstance(args, dict): | |
| args = {"action": "look"} | |
| return thought, tool_name, args | |
| def _call_llm(self, prompt: str, system_prompt: str, seed: int) -> str: | |
| """ | |
| Call the LLM with the given prompt. | |
| This is a convenience wrapper - you can also use call_llm() directly. | |
| """ | |
| return call_llm(prompt, system_prompt, seed) | |
| async def _get_stats(self, client, tool_names: set[str]) -> dict: | |
| if "get_stats" not in tool_names: | |
| return { | |
| "score": self.score, | |
| "max_score": self.max_score, | |
| "moves": 0, | |
| "done": False, | |
| } | |
| try: | |
| result = await client.call_tool("get_stats", {}) | |
| text = self._extract_result(result) | |
| return self._parse_stats(text) | |
| except Exception: | |
| return { | |
| "score": self.score, | |
| "max_score": self.max_score, | |
| "moves": 0, | |
| "done": False, | |
| } | |
| def _parse_stats(self, text: str) -> dict: | |
| text = text.strip() | |
| try: | |
| return json.loads(text) | |
| except Exception: | |
| data: dict[str, object] = {} | |
| for key in [ | |
| "score", "max_score", "moves", "reward", "no_progress_streak", | |
| "same_location_streak", "repeated_action_streak", "unique_locations", | |
| "unique_recent_observations", | |
| ]: | |
| match = re.search(rf'"{key}"\s*:\s*(-?\d+)', text) | |
| if match: | |
| data[key] = int(match.group(1)) | |
| for key in ["game", "location"]: | |
| match = re.search(rf'"{key}"\s*:\s*"([^"]*)"', text) | |
| if match: | |
| data[key] = match.group(1) | |
| done_match = re.search(r'"done"\s*:\s*(true|false)', text, flags=re.IGNORECASE) | |
| if done_match: | |
| data["done"] = done_match.group(1).lower() == "true" | |
| return data | |
| def _update_state_from_stats(self, stats: dict) -> None: | |
| if not stats: | |
| return | |
| self.score = int(stats.get("score", self.score) or self.score) | |
| self.max_score = int(stats.get("max_score", self.max_score) or self.max_score) | |
| def _validate_tool_call(self, tool_name: str, tool_args: dict, valid_tools: set[str]) -> tuple[str, dict]: | |
| aliases = { | |
| "action": "play_action", | |
| "act": "play_action", | |
| "play": "play_action", | |
| "map": "get_map", | |
| "stats": "get_stats", | |
| "state": "memory", | |
| "inv": "inventory", | |
| "store": "remember", | |
| "notes": "recall", | |
| } | |
| tool_name = aliases.get(tool_name, tool_name) | |
| if tool_name not in valid_tools: | |
| tool_name = "play_action" | |
| if not isinstance(tool_args, dict): | |
| tool_args = {} | |
| if tool_name == "play_action": | |
| action = tool_args.get("action", "look") | |
| tool_args = {"action": self._normalize_action(action)} | |
| elif tool_name == "remember": | |
| key = str(tool_args.get("key", "note")).strip() or "note" | |
| value = str(tool_args.get("value", "")).strip() or "unknown" | |
| tool_args = {"key": key[:64], "value": value[:220]} | |
| elif tool_name == "recall": | |
| key = str(tool_args.get("key", "")).strip() | |
| tool_args = {"key": key} | |
| else: | |
| tool_args = {} | |
| return tool_name, tool_args | |
| def _normalize_action(self, action: str) -> str: | |
| action = str(action).lower().strip() | |
| action = action.replace("**", "").replace("`", "") | |
| action = " ".join(action.split()) | |
| invalid_verb_map = { | |
| "check": "examine", | |
| "inspect": "examine", | |
| "search": "look", | |
| "grab": "take", | |
| "pick": "take", | |
| "investigate": "examine", | |
| } | |
| words = action.split() | |
| if words and words[0] in invalid_verb_map: | |
| words[0] = invalid_verb_map[words[0]] | |
| action = " ".join(words) | |
| return action or "look" | |
| def _anti_loop_override(self, action: str, location: str, stats: dict) -> str: | |
| loc = location or "Unknown" | |
| key = (loc, action) | |
| no_progress = int(stats.get("no_progress_streak", 0) or 0) | |
| repeated_action_streak = int(stats.get("repeated_action_streak", 0) or 0) | |
| if self.location_action_counts.get(key, 0) >= 2 and no_progress >= 2: | |
| return self._next_exploration_action(loc, self.cached_map) | |
| if repeated_action_streak >= 2 and len(self.recent_actions) >= 2: | |
| if action == self.recent_actions[-1]: | |
| return self._next_exploration_action(loc, self.cached_map) | |
| if no_progress >= 4 and action in {"look", "inventory", "wait"}: | |
| return self._next_exploration_action(loc, self.cached_map) | |
| return action | |
| def _next_exploration_action(self, current_location: str, map_snapshot: str) -> str: | |
| frontier = self._extract_frontier_from_map(map_snapshot) | |
| recent = set(list(self.recent_actions)[-4:]) | |
| for direction in frontier: | |
| if direction not in recent: | |
| return direction | |
| fallback = [ | |
| "north", "south", "east", "west", "up", "down", | |
| "enter", "exit", "examine room", "look", | |
| ] | |
| loc = current_location or "Unknown" | |
| for action in fallback: | |
| if self.location_action_counts.get((loc, action), 0) < 2: | |
| return action | |
| return "look" | |
| def _extract_frontier_from_map(self, map_snapshot: str) -> list[str]: | |
| if not map_snapshot: | |
| return ["north", "south", "east", "west"] | |
| match = re.search( | |
| r"Frontier directions not yet tried here:\s*(.+)", | |
| map_snapshot, | |
| flags=re.IGNORECASE, | |
| ) | |
| if not match: | |
| return ["north", "south", "east", "west"] | |
| dirs = [d.strip().lower() for d in match.group(1).split(",") if d.strip()] | |
| return dirs or ["north", "south", "east", "west"] | |
| def _extract_result(self, result) -> str: | |
| if hasattr(result, "content") and result.content: | |
| item = result.content[0] | |
| if hasattr(item, "text"): | |
| return item.text | |
| return str(item) | |
| if isinstance(result, list) and result: | |
| first = result[0] | |
| if hasattr(first, "text"): | |
| return first.text | |
| return str(first) | |
| return str(result) | |
| def _extract_location(self, observation: str) -> str: | |
| lines = (observation or "").strip().split("\n") | |
| if not lines: | |
| return "Unknown" | |
| first = lines[0].strip() | |
| return first[:120] if first else "Unknown" | |
| def _is_game_over(self, text: str) -> bool: | |
| lower = (text or "").lower() | |
| endings = [ | |
| "game over", | |
| "you have died", | |
| "you are dead", | |
| "*** you have died ***", | |
| "[game_over]", | |
| ] | |
| return any(token in lower for token in endings) | |
| async def _maybe_store_note(self, client, tool_names: set[str], location: str, observation: str) -> None: | |
| if "remember" not in tool_names: | |
| return | |
| text = (observation or "").lower() | |
| keywords = ["locked", "key", "door", "treasure", "cannot", "need", "dark"] | |
| if not any(k in text for k in keywords): | |
| return | |
| note_text = " ".join((observation or "").strip().split())[:170] | |
| key = f"clue_{self.note_counter}_{(location or 'unknown')[:20]}" | |
| self.note_counter += 1 | |
| try: | |
| await client.call_tool("remember", {"key": key, "value": note_text}) | |
| except Exception: | |
| return | |
| # ============================================================================= | |
| # For local testing | |
| # ============================================================================= | |
| async def test_agent(): | |
| """Test the agent locally.""" | |
| from fastmcp import Client | |
| # Path to your MCP server | |
| server_path = "mcp_server.py" | |
| agent = StudentAgent() | |
| async with Client(server_path) as client: | |
| result = await agent.run( | |
| client=client, | |
| game="zork1", | |
| max_steps=10, | |
| seed=42, | |
| verbose=True, | |
| ) | |
| print(f"\nFinal Score: {result.final_score}") | |
| print(f"Moves: {result.moves}") | |
| print(f"Locations: {result.locations_visited}") | |
| if __name__ == "__main__": | |
| import asyncio | |
| asyncio.run(test_agent()) | |